2024-12-09 01:48:58,065 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-09 01:48:58,082 main DEBUG Took 0.014276 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 01:48:58,083 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 01:48:58,083 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 01:48:58,085 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 01:48:58,086 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,094 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 01:48:58,109 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,111 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,112 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,112 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,113 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,113 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,114 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,115 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,115 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,116 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,117 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,118 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,118 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,119 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,120 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,120 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,121 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,121 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,122 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,122 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,123 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,123 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,124 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,124 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 01:48:58,125 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,126 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 01:48:58,128 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 01:48:58,130 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 01:48:58,132 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 01:48:58,133 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 01:48:58,134 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 01:48:58,135 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 01:48:58,148 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 01:48:58,152 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 01:48:58,154 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 01:48:58,155 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 01:48:58,156 main DEBUG createAppenders(={Console}) 2024-12-09 01:48:58,157 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-12-09 01:48:58,157 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-12-09 01:48:58,157 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-12-09 01:48:58,158 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 01:48:58,159 main DEBUG OutputStream closed 2024-12-09 01:48:58,159 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 01:48:58,160 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 01:48:58,160 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-12-09 01:48:58,260 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 01:48:58,263 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 01:48:58,265 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 01:48:58,266 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 01:48:58,267 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 01:48:58,267 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 01:48:58,268 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 01:48:58,268 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 01:48:58,269 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 01:48:58,269 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 01:48:58,270 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 01:48:58,270 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 01:48:58,271 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 01:48:58,271 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 01:48:58,271 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 01:48:58,272 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 01:48:58,272 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 01:48:58,274 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 01:48:58,277 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 01:48:58,277 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-12-09 01:48:58,278 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 01:48:58,279 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-12-09T01:48:58,626 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552 2024-12-09 01:48:58,630 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 01:48:58,631 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T01:48:58,645 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-12-09T01:48:58,684 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=393, ProcessCount=11, AvailableMemoryMB=12331 2024-12-09T01:48:58,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T01:48:58,701 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/cluster_ea14b0e5-c9f4-0c90-de5e-70a9af0a8331, deleteOnExit=true 2024-12-09T01:48:58,701 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T01:48:58,702 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/test.cache.data in system properties and HBase conf 2024-12-09T01:48:58,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T01:48:58,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/hadoop.log.dir in system properties and HBase conf 2024-12-09T01:48:58,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T01:48:58,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T01:48:58,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T01:48:58,807 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T01:48:58,912 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T01:48:58,916 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:48:58,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:48:58,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T01:48:58,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:48:58,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T01:48:58,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T01:48:58,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:48:58,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:48:58,920 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T01:48:58,920 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/nfs.dump.dir in system properties and HBase conf 2024-12-09T01:48:58,921 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/java.io.tmpdir in system properties and HBase conf 2024-12-09T01:48:58,921 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:48:58,921 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T01:48:58,922 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T01:48:59,447 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:48:59,812 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T01:48:59,892 INFO [Time-limited test {}] log.Log(170): Logging initialized @2628ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T01:48:59,970 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:49:00,031 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:49:00,051 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:49:00,051 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:49:00,053 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:49:00,065 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:49:00,068 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:49:00,069 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:49:00,288 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/java.io.tmpdir/jetty-localhost-36993-hadoop-hdfs-3_4_1-tests_jar-_-any-1265680866844279598/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:49:00,295 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:36993} 2024-12-09T01:49:00,296 INFO [Time-limited test {}] server.Server(415): Started @3033ms 2024-12-09T01:49:00,322 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:49:00,704 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:49:00,712 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:49:00,715 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:49:00,715 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:49:00,716 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:49:00,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:49:00,718 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:49:00,851 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/java.io.tmpdir/jetty-localhost-44889-hadoop-hdfs-3_4_1-tests_jar-_-any-482715330703371302/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:49:00,852 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:44889} 2024-12-09T01:49:00,852 INFO [Time-limited test {}] server.Server(415): Started @3590ms 2024-12-09T01:49:00,913 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:49:01,061 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:49:01,068 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:49:01,083 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:49:01,083 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:49:01,084 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:49:01,085 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:49:01,086 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:49:01,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/java.io.tmpdir/jetty-localhost-36581-hadoop-hdfs-3_4_1-tests_jar-_-any-10048641573432555334/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:49:01,245 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:36581} 2024-12-09T01:49:01,245 INFO [Time-limited test {}] server.Server(415): Started @3982ms 2024-12-09T01:49:01,248 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:49:01,404 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/cluster_ea14b0e5-c9f4-0c90-de5e-70a9af0a8331/data/data1/current/BP-29672646-172.17.0.2-1733708939553/current, will proceed with Du for space computation calculation, 2024-12-09T01:49:01,404 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/cluster_ea14b0e5-c9f4-0c90-de5e-70a9af0a8331/data/data2/current/BP-29672646-172.17.0.2-1733708939553/current, will proceed with Du for space computation calculation, 2024-12-09T01:49:01,404 WARN [Thread-95 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/cluster_ea14b0e5-c9f4-0c90-de5e-70a9af0a8331/data/data3/current/BP-29672646-172.17.0.2-1733708939553/current, will proceed with Du for space computation calculation, 2024-12-09T01:49:01,405 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/cluster_ea14b0e5-c9f4-0c90-de5e-70a9af0a8331/data/data4/current/BP-29672646-172.17.0.2-1733708939553/current, will proceed with Du for space computation calculation, 2024-12-09T01:49:01,482 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:49:01,487 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:49:01,559 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x443ec87da5601e21 with lease ID 0x5fc0abc95ea22916: Processing first storage report for DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e from datanode DatanodeRegistration(127.0.0.1:41677, datanodeUuid=78149fd4-bedc-4bb1-be68-887f779a4dde, infoPort=45761, infoSecurePort=0, ipcPort=34737, storageInfo=lv=-57;cid=testClusterID;nsid=1057601906;c=1733708939553) 2024-12-09T01:49:01,560 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x443ec87da5601e21 with lease ID 0x5fc0abc95ea22916: from storage DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e node DatanodeRegistration(127.0.0.1:41677, datanodeUuid=78149fd4-bedc-4bb1-be68-887f779a4dde, infoPort=45761, infoSecurePort=0, ipcPort=34737, storageInfo=lv=-57;cid=testClusterID;nsid=1057601906;c=1733708939553), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-09T01:49:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41b503a9aba883d6 with lease ID 0x5fc0abc95ea22915: Processing first storage report for DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4 from datanode DatanodeRegistration(127.0.0.1:44689, datanodeUuid=4e0fe31b-1c5e-4376-96c7-f63f7fc8806c, infoPort=37463, infoSecurePort=0, ipcPort=42121, storageInfo=lv=-57;cid=testClusterID;nsid=1057601906;c=1733708939553) 2024-12-09T01:49:01,561 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41b503a9aba883d6 with lease ID 0x5fc0abc95ea22915: from storage DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4 node DatanodeRegistration(127.0.0.1:44689, datanodeUuid=4e0fe31b-1c5e-4376-96c7-f63f7fc8806c, infoPort=37463, infoSecurePort=0, ipcPort=42121, storageInfo=lv=-57;cid=testClusterID;nsid=1057601906;c=1733708939553), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:49:01,562 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x443ec87da5601e21 with lease ID 0x5fc0abc95ea22916: Processing first storage report for DS-600d41ca-fead-4836-bcfd-f72abf6dae96 from datanode DatanodeRegistration(127.0.0.1:41677, datanodeUuid=78149fd4-bedc-4bb1-be68-887f779a4dde, infoPort=45761, infoSecurePort=0, ipcPort=34737, storageInfo=lv=-57;cid=testClusterID;nsid=1057601906;c=1733708939553) 2024-12-09T01:49:01,562 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x443ec87da5601e21 with lease ID 0x5fc0abc95ea22916: from storage DS-600d41ca-fead-4836-bcfd-f72abf6dae96 node DatanodeRegistration(127.0.0.1:41677, datanodeUuid=78149fd4-bedc-4bb1-be68-887f779a4dde, infoPort=45761, infoSecurePort=0, ipcPort=34737, storageInfo=lv=-57;cid=testClusterID;nsid=1057601906;c=1733708939553), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:49:01,562 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x41b503a9aba883d6 with lease ID 0x5fc0abc95ea22915: Processing first storage report for DS-84627d82-ea0f-4998-b187-be90ea13a6b8 from datanode DatanodeRegistration(127.0.0.1:44689, datanodeUuid=4e0fe31b-1c5e-4376-96c7-f63f7fc8806c, infoPort=37463, infoSecurePort=0, ipcPort=42121, storageInfo=lv=-57;cid=testClusterID;nsid=1057601906;c=1733708939553) 2024-12-09T01:49:01,563 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x41b503a9aba883d6 with lease ID 0x5fc0abc95ea22915: from storage DS-84627d82-ea0f-4998-b187-be90ea13a6b8 node DatanodeRegistration(127.0.0.1:44689, datanodeUuid=4e0fe31b-1c5e-4376-96c7-f63f7fc8806c, infoPort=37463, infoSecurePort=0, ipcPort=42121, storageInfo=lv=-57;cid=testClusterID;nsid=1057601906;c=1733708939553), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T01:49:01,694 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552 2024-12-09T01:49:01,790 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/cluster_ea14b0e5-c9f4-0c90-de5e-70a9af0a8331/zookeeper_0, clientPort=54870, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/cluster_ea14b0e5-c9f4-0c90-de5e-70a9af0a8331/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/cluster_ea14b0e5-c9f4-0c90-de5e-70a9af0a8331/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T01:49:01,802 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54870 2024-12-09T01:49:01,818 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:49:01,823 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:49:02,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:49:02,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:49:02,464 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1 with version=8 2024-12-09T01:49:02,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/hbase-staging 2024-12-09T01:49:02,562 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T01:49:02,830 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:49:02,843 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:49:02,843 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:49:02,851 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:49:02,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:49:02,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:49:03,002 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T01:49:03,063 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T01:49:03,072 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T01:49:03,077 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:49:03,105 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 1219 (auto-detected) 2024-12-09T01:49:03,106 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T01:49:03,132 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36653 2024-12-09T01:49:03,157 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36653 connecting to ZooKeeper ensemble=127.0.0.1:54870 2024-12-09T01:49:03,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:366530x0, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:49:03,193 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36653-0x1007476c2af0000 connected 2024-12-09T01:49:03,224 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:49:03,226 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:49:03,237 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:49:03,241 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1, hbase.cluster.distributed=false 2024-12-09T01:49:03,264 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:49:03,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36653 2024-12-09T01:49:03,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36653 2024-12-09T01:49:03,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36653 2024-12-09T01:49:03,271 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36653 2024-12-09T01:49:03,271 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36653 2024-12-09T01:49:03,391 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:49:03,393 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:49:03,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:49:03,394 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:49:03,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:49:03,394 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:49:03,398 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:49:03,401 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:49:03,402 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43333 2024-12-09T01:49:03,404 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43333 connecting to ZooKeeper ensemble=127.0.0.1:54870 2024-12-09T01:49:03,405 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:49:03,410 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:49:03,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:433330x0, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:49:03,419 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:433330x0, quorum=127.0.0.1:54870, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:49:03,419 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43333-0x1007476c2af0001 connected 2024-12-09T01:49:03,425 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:49:03,433 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:49:03,436 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:49:03,442 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:49:03,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43333 2024-12-09T01:49:03,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43333 2024-12-09T01:49:03,444 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43333 2024-12-09T01:49:03,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43333 2024-12-09T01:49:03,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43333 2024-12-09T01:49:03,466 DEBUG [M:0;ef6f18c58dc9:36653 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef6f18c58dc9:36653 2024-12-09T01:49:03,467 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef6f18c58dc9,36653,1733708942619 2024-12-09T01:49:03,474 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:49:03,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:49:03,477 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef6f18c58dc9,36653,1733708942619 2024-12-09T01:49:03,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:49:03,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:49:03,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:49:03,500 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T01:49:03,501 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef6f18c58dc9,36653,1733708942619 from backup master directory 2024-12-09T01:49:03,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:49:03,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef6f18c58dc9,36653,1733708942619 2024-12-09T01:49:03,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:49:03,505 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:49:03,505 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef6f18c58dc9,36653,1733708942619 2024-12-09T01:49:03,507 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T01:49:03,508 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T01:49:03,566 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/hbase.id] with ID: cbb2811c-8e4a-489d-95a1-fc57616a72e5 2024-12-09T01:49:03,566 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/.tmp/hbase.id 2024-12-09T01:49:03,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:49:03,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:49:03,580 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/.tmp/hbase.id]:[hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/hbase.id] 2024-12-09T01:49:03,629 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:49:03,636 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T01:49:03,658 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 20ms. 2024-12-09T01:49:03,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:49:03,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:49:03,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:49:03,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:49:03,697 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:49:03,699 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T01:49:03,706 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:49:03,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:49:03,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:49:03,760 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store 2024-12-09T01:49:03,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:49:03,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:49:03,787 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T01:49:03,790 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:49:03,791 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:49:03,791 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:49:03,791 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:49:03,793 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 1 ms 2024-12-09T01:49:03,793 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:49:03,793 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:49:03,794 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733708943791Disabling compacts and flushes for region at 1733708943791Disabling writes for close at 1733708943793 (+2 ms)Writing region close event to WAL at 1733708943793Closed at 1733708943793 2024-12-09T01:49:03,797 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/.initializing 2024-12-09T01:49:03,797 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/WALs/ef6f18c58dc9,36653,1733708942619 2024-12-09T01:49:03,819 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C36653%2C1733708942619, suffix=, logDir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/WALs/ef6f18c58dc9,36653,1733708942619, archiveDir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/oldWALs, maxLogs=10 2024-12-09T01:49:03,829 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C36653%2C1733708942619.1733708943825 2024-12-09T01:49:03,849 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/WALs/ef6f18c58dc9,36653,1733708942619/ef6f18c58dc9%2C36653%2C1733708942619.1733708943825 2024-12-09T01:49:03,858 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45761:45761),(127.0.0.1/127.0.0.1:37463:37463)] 2024-12-09T01:49:03,859 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:49:03,860 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:49:03,863 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:49:03,864 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:49:03,902 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:49:03,929 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T01:49:03,933 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:03,936 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:49:03,937 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:49:03,939 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T01:49:03,940 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:03,941 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:49:03,941 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:49:03,943 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T01:49:03,944 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:03,945 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:49:03,945 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:49:03,947 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T01:49:03,948 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:03,948 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:49:03,949 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:49:03,952 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:49:03,954 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:49:03,958 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:49:03,959 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:49:03,962 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:49:03,966 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:49:03,971 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:49:03,973 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=807195, jitterRate=0.026401787996292114}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:49:03,979 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733708943877Initializing all the Stores at 1733708943879 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733708943879Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733708943880 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733708943880Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733708943880Cleaning up temporary data from old regions at 1733708943959 (+79 ms)Region opened successfully at 1733708943979 (+20 ms) 2024-12-09T01:49:03,981 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T01:49:04,018 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33cdd114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:49:04,051 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T01:49:04,064 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T01:49:04,064 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T01:49:04,067 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T01:49:04,069 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T01:49:04,074 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-09T01:49:04,074 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T01:49:04,102 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T01:49:04,111 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T01:49:04,114 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T01:49:04,116 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T01:49:04,118 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T01:49:04,120 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T01:49:04,122 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T01:49:04,126 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T01:49:04,129 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T01:49:04,130 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T01:49:04,132 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T01:49:04,148 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T01:49:04,150 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T01:49:04,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:49:04,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:49:04,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:49:04,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:49:04,157 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef6f18c58dc9,36653,1733708942619, sessionid=0x1007476c2af0000, setting cluster-up flag (Was=false) 2024-12-09T01:49:04,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:49:04,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:49:04,176 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T01:49:04,178 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,36653,1733708942619 2024-12-09T01:49:04,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:49:04,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:49:04,190 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T01:49:04,192 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,36653,1733708942619 2024-12-09T01:49:04,198 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T01:49:04,255 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(746): ClusterId : cbb2811c-8e4a-489d-95a1-fc57616a72e5 2024-12-09T01:49:04,258 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:49:04,263 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:49:04,263 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:49:04,267 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:49:04,267 DEBUG [RS:0;ef6f18c58dc9:43333 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4020c6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:49:04,273 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T01:49:04,283 DEBUG [RS:0;ef6f18c58dc9:43333 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef6f18c58dc9:43333 2024-12-09T01:49:04,283 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T01:49:04,286 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:49:04,286 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:49:04,286 DEBUG [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:49:04,289 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,36653,1733708942619 with port=43333, startcode=1733708943349 2024-12-09T01:49:04,291 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T01:49:04,299 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef6f18c58dc9,36653,1733708942619 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T01:49:04,304 DEBUG [RS:0;ef6f18c58dc9:43333 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:49:04,307 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:49:04,308 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:49:04,308 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:49:04,308 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:49:04,308 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef6f18c58dc9:0, corePoolSize=10, maxPoolSize=10 2024-12-09T01:49:04,308 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,309 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:49:04,309 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,319 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733708974319 2024-12-09T01:49:04,321 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T01:49:04,321 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:49:04,322 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T01:49:04,322 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T01:49:04,326 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T01:49:04,327 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T01:49:04,327 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T01:49:04,327 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T01:49:04,330 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:04,331 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T01:49:04,333 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,345 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T01:49:04,346 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T01:49:04,347 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T01:49:04,351 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T01:49:04,352 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T01:49:04,354 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733708944353,5,FailOnTimeoutGroup] 2024-12-09T01:49:04,356 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733708944354,5,FailOnTimeoutGroup] 2024-12-09T01:49:04,356 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,356 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T01:49:04,357 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,358 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:49:04,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:49:04,362 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T01:49:04,363 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1 2024-12-09T01:49:04,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:49:04,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:49:04,390 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:49:04,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:49:04,400 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:49:04,400 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:04,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:49:04,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:49:04,402 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47895, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:49:04,404 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:49:04,404 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:04,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:49:04,405 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:49:04,408 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:49:04,408 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:04,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:49:04,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:49:04,410 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36653 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,43333,1733708943349 2024-12-09T01:49:04,412 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:49:04,412 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:04,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:49:04,413 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36653 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,43333,1733708943349 2024-12-09T01:49:04,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:49:04,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740 2024-12-09T01:49:04,416 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740 2024-12-09T01:49:04,419 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:49:04,419 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:49:04,420 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:49:04,423 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:49:04,428 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:49:04,429 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=844886, jitterRate=0.07432927191257477}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:49:04,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733708944390Initializing all the Stores at 1733708944392 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733708944392Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733708944396 (+4 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733708944396Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733708944396Cleaning up temporary data from old regions at 1733708944419 (+23 ms)Region opened successfully at 1733708944432 (+13 ms) 2024-12-09T01:49:04,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:49:04,433 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:49:04,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:49:04,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:49:04,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:49:04,434 DEBUG [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1 2024-12-09T01:49:04,434 DEBUG [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36525 2024-12-09T01:49:04,434 DEBUG [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:49:04,434 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:49:04,435 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733708944433Disabling compacts and flushes for region at 1733708944433Disabling writes for close at 1733708944433Writing region close event to WAL at 1733708944434 (+1 ms)Closed at 1733708944434 2024-12-09T01:49:04,439 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:49:04,439 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T01:49:04,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:49:04,441 DEBUG [RS:0;ef6f18c58dc9:43333 {}] zookeeper.ZKUtil(111): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,43333,1733708943349 2024-12-09T01:49:04,442 WARN [RS:0;ef6f18c58dc9:43333 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:49:04,442 INFO [RS:0;ef6f18c58dc9:43333 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:49:04,442 DEBUG [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349 2024-12-09T01:49:04,444 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,43333,1733708943349] 2024-12-09T01:49:04,447 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T01:49:04,456 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:49:04,459 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T01:49:04,476 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:49:04,493 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:49:04,499 INFO [RS:0;ef6f18c58dc9:43333 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:49:04,500 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,501 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:49:04,508 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:49:04,510 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,510 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,510 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,510 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,511 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,511 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,511 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:49:04,511 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,511 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,512 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,512 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,512 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,512 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:49:04,512 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:49:04,512 DEBUG [RS:0;ef6f18c58dc9:43333 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:49:04,514 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,514 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,514 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,514 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,514 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,515 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,43333,1733708943349-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:49:04,535 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:49:04,537 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,43333,1733708943349-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,537 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,537 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.Replication(171): ef6f18c58dc9,43333,1733708943349 started 2024-12-09T01:49:04,556 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:04,556 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,43333,1733708943349, RpcServer on ef6f18c58dc9/172.17.0.2:43333, sessionid=0x1007476c2af0001 2024-12-09T01:49:04,557 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:49:04,557 DEBUG [RS:0;ef6f18c58dc9:43333 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,43333,1733708943349 2024-12-09T01:49:04,557 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,43333,1733708943349' 2024-12-09T01:49:04,557 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:49:04,559 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:49:04,559 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:49:04,559 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:49:04,560 DEBUG [RS:0;ef6f18c58dc9:43333 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,43333,1733708943349 2024-12-09T01:49:04,560 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,43333,1733708943349' 2024-12-09T01:49:04,560 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:49:04,560 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:49:04,561 DEBUG [RS:0;ef6f18c58dc9:43333 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:49:04,561 INFO [RS:0;ef6f18c58dc9:43333 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:49:04,561 INFO [RS:0;ef6f18c58dc9:43333 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:49:04,610 WARN [ef6f18c58dc9:36653 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T01:49:04,672 INFO [RS:0;ef6f18c58dc9:43333 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C43333%2C1733708943349, suffix=, logDir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349, archiveDir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/oldWALs, maxLogs=32 2024-12-09T01:49:04,675 INFO [RS:0;ef6f18c58dc9:43333 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C43333%2C1733708943349.1733708944675 2024-12-09T01:49:04,688 INFO [RS:0;ef6f18c58dc9:43333 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708944675 2024-12-09T01:49:04,694 DEBUG [RS:0;ef6f18c58dc9:43333 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45761:45761),(127.0.0.1/127.0.0.1:37463:37463)] 2024-12-09T01:49:04,864 DEBUG [ef6f18c58dc9:36653 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T01:49:04,880 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef6f18c58dc9,43333,1733708943349 2024-12-09T01:49:04,890 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,43333,1733708943349, state=OPENING 2024-12-09T01:49:04,896 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T01:49:04,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:49:04,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:49:04,900 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:49:04,900 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:49:04,902 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:49:04,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,43333,1733708943349}] 2024-12-09T01:49:05,083 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T01:49:05,087 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47851, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T01:49:05,105 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T01:49:05,106 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:49:05,111 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C43333%2C1733708943349.meta, suffix=.meta, logDir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349, archiveDir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/oldWALs, maxLogs=32 2024-12-09T01:49:05,113 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C43333%2C1733708943349.meta.1733708945113.meta 2024-12-09T01:49:05,134 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.meta.1733708945113.meta 2024-12-09T01:49:05,143 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37463:37463),(127.0.0.1/127.0.0.1:45761:45761)] 2024-12-09T01:49:05,148 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:49:05,151 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T01:49:05,154 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T01:49:05,163 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T01:49:05,170 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T01:49:05,170 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:49:05,171 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T01:49:05,171 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T01:49:05,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:49:05,177 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:49:05,178 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:05,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:49:05,179 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:49:05,181 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:49:05,181 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:05,182 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:49:05,182 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:49:05,184 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:49:05,184 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:05,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:49:05,185 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:49:05,187 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:49:05,187 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:05,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:49:05,189 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:49:05,191 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740 2024-12-09T01:49:05,195 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740 2024-12-09T01:49:05,203 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:49:05,203 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:49:05,204 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:49:05,207 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:49:05,209 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=817746, jitterRate=0.03981901705265045}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:49:05,209 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T01:49:05,211 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733708945172Writing region info on filesystem at 1733708945172Initializing all the Stores at 1733708945174 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733708945175 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733708945175Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733708945175Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733708945175Cleaning up temporary data from old regions at 1733708945203 (+28 ms)Running coprocessor post-open hooks at 1733708945209 (+6 ms)Region opened successfully at 1733708945211 (+2 ms) 2024-12-09T01:49:05,218 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733708945073 2024-12-09T01:49:05,231 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T01:49:05,231 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T01:49:05,234 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,43333,1733708943349 2024-12-09T01:49:05,243 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,43333,1733708943349, state=OPEN 2024-12-09T01:49:05,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:49:05,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:49:05,252 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:49:05,252 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:49:05,253 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,43333,1733708943349 2024-12-09T01:49:05,262 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T01:49:05,262 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,43333,1733708943349 in 349 msec 2024-12-09T01:49:05,271 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T01:49:05,271 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 819 msec 2024-12-09T01:49:05,273 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:49:05,273 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T01:49:05,302 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:49:05,303 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,43333,1733708943349, seqNum=-1] 2024-12-09T01:49:05,332 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:49:05,334 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58927, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:49:05,358 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1290 sec 2024-12-09T01:49:05,358 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733708945358, completionTime=-1 2024-12-09T01:49:05,362 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T01:49:05,362 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T01:49:05,398 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T01:49:05,399 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733709005399 2024-12-09T01:49:05,399 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733709065399 2024-12-09T01:49:05,399 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 37 msec 2024-12-09T01:49:05,402 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,36653,1733708942619-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:05,403 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,36653,1733708942619-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:05,403 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,36653,1733708942619-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:05,404 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef6f18c58dc9:36653, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:05,405 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:05,405 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:05,414 DEBUG [master/ef6f18c58dc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T01:49:05,439 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.934sec 2024-12-09T01:49:05,441 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T01:49:05,443 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T01:49:05,444 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T01:49:05,445 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T01:49:05,445 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T01:49:05,446 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,36653,1733708942619-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:49:05,447 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,36653,1733708942619-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T01:49:05,510 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T01:49:05,511 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T01:49:05,512 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,36653,1733708942619-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:49:05,520 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64202c07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:49:05,523 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T01:49:05,524 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T01:49:05,528 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ef6f18c58dc9,36653,-1 for getting cluster id 2024-12-09T01:49:05,531 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T01:49:05,551 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'cbb2811c-8e4a-489d-95a1-fc57616a72e5' 2024-12-09T01:49:05,554 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T01:49:05,554 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "cbb2811c-8e4a-489d-95a1-fc57616a72e5" 2024-12-09T01:49:05,556 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ce8903c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:49:05,556 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ef6f18c58dc9,36653,-1] 2024-12-09T01:49:05,559 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T01:49:05,564 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:49:05,566 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46482, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T01:49:05,570 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@717eca08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:49:05,570 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:49:05,578 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,43333,1733708943349, seqNum=-1] 2024-12-09T01:49:05,579 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:49:05,581 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40764, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:49:05,608 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ef6f18c58dc9,36653,1733708942619 2024-12-09T01:49:05,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:49:05,619 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T01:49:05,625 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T01:49:05,631 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is ef6f18c58dc9,36653,1733708942619 2024-12-09T01:49:05,635 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3ac11723 2024-12-09T01:49:05,637 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T01:49:05,643 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46486, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T01:49:05,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36653 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T01:49:05,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36653 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T01:49:05,650 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36653 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:49:05,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36653 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-12-09T01:49:05,666 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T01:49:05,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36653 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-12-09T01:49:05,669 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:05,673 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T01:49:05,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:49:05,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741835_1011 (size=389) 2024-12-09T01:49:05,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741835_1011 (size=389) 2024-12-09T01:49:05,728 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8c2a461bcd4182ee1839cc12c901c548, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1 2024-12-09T01:49:05,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741836_1012 (size=72) 2024-12-09T01:49:05,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741836_1012 (size=72) 2024-12-09T01:49:05,752 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:49:05,753 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 8c2a461bcd4182ee1839cc12c901c548, disabling compactions & flushes 2024-12-09T01:49:05,753 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:49:05,753 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:49:05,753 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. after waiting 0 ms 2024-12-09T01:49:05,753 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:49:05,753 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:49:05,753 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8c2a461bcd4182ee1839cc12c901c548: Waiting for close lock at 1733708945752Disabling compacts and flushes for region at 1733708945752Disabling writes for close at 1733708945753 (+1 ms)Writing region close event to WAL at 1733708945753Closed at 1733708945753 2024-12-09T01:49:05,757 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T01:49:05,764 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1733708945757"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733708945757"}]},"ts":"1733708945757"} 2024-12-09T01:49:05,771 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T01:49:05,773 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T01:49:05,777 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733708945774"}]},"ts":"1733708945774"} 2024-12-09T01:49:05,792 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-12-09T01:49:05,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=8c2a461bcd4182ee1839cc12c901c548, ASSIGN}] 2024-12-09T01:49:05,798 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=8c2a461bcd4182ee1839cc12c901c548, ASSIGN 2024-12-09T01:49:05,800 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=8c2a461bcd4182ee1839cc12c901c548, ASSIGN; state=OFFLINE, location=ef6f18c58dc9,43333,1733708943349; forceNewPlan=false, retain=false 2024-12-09T01:49:05,952 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8c2a461bcd4182ee1839cc12c901c548, regionState=OPENING, regionLocation=ef6f18c58dc9,43333,1733708943349 2024-12-09T01:49:05,957 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=8c2a461bcd4182ee1839cc12c901c548, ASSIGN because future has completed 2024-12-09T01:49:05,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8c2a461bcd4182ee1839cc12c901c548, server=ef6f18c58dc9,43333,1733708943349}] 2024-12-09T01:49:06,124 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:49:06,124 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8c2a461bcd4182ee1839cc12c901c548, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:49:06,125 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:06,125 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:49:06,125 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:06,125 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:06,129 INFO [StoreOpener-8c2a461bcd4182ee1839cc12c901c548-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:06,133 INFO [StoreOpener-8c2a461bcd4182ee1839cc12c901c548-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8c2a461bcd4182ee1839cc12c901c548 columnFamilyName info 2024-12-09T01:49:06,133 DEBUG [StoreOpener-8c2a461bcd4182ee1839cc12c901c548-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:49:06,135 INFO [StoreOpener-8c2a461bcd4182ee1839cc12c901c548-1 {}] regionserver.HStore(327): Store=8c2a461bcd4182ee1839cc12c901c548/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:49:06,135 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:06,137 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:06,137 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:06,138 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:06,138 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:06,142 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:06,159 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:49:06,161 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8c2a461bcd4182ee1839cc12c901c548; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=814318, jitterRate=0.03545890748500824}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T01:49:06,161 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:06,162 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8c2a461bcd4182ee1839cc12c901c548: Running coprocessor pre-open hook at 1733708946126Writing region info on filesystem at 1733708946126Initializing all the Stores at 1733708946128 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733708946128Cleaning up temporary data from old regions at 1733708946138 (+10 ms)Running coprocessor post-open hooks at 1733708946161 (+23 ms)Region opened successfully at 1733708946162 (+1 ms) 2024-12-09T01:49:06,165 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548., pid=6, masterSystemTime=1733708946113 2024-12-09T01:49:06,171 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:49:06,171 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:49:06,172 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8c2a461bcd4182ee1839cc12c901c548, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,43333,1733708943349 2024-12-09T01:49:06,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8c2a461bcd4182ee1839cc12c901c548, server=ef6f18c58dc9,43333,1733708943349 because future has completed 2024-12-09T01:49:06,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T01:49:06,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8c2a461bcd4182ee1839cc12c901c548, server=ef6f18c58dc9,43333,1733708943349 in 223 msec 2024-12-09T01:49:06,194 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T01:49:06,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=8c2a461bcd4182ee1839cc12c901c548, ASSIGN in 394 msec 2024-12-09T01:49:06,197 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T01:49:06,198 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733708946197"}]},"ts":"1733708946197"} 2024-12-09T01:49:06,203 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-12-09T01:49:06,206 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T01:49:06,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 554 msec 2024-12-09T01:49:10,674 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-09T01:49:10,728 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T01:49:10,730 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-12-09T01:49:13,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T01:49:13,061 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T01:49:13,063 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T01:49:13,063 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T01:49:13,065 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:49:13,065 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T01:49:13,066 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T01:49:13,066 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T01:49:15,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36653 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:49:15,767 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-12-09T01:49:15,770 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-12-09T01:49:15,778 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-12-09T01:49:15,779 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:49:15,780 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C43333%2C1733708943349.1733708955780 2024-12-09T01:49:15,799 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:15,799 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:15,800 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:15,800 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:15,800 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:15,801 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708944675 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708955780 2024-12-09T01:49:15,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741833_1009 (size=451) 2024-12-09T01:49:15,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741833_1009 (size=451) 2024-12-09T01:49:15,825 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708944675 to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/oldWALs/ef6f18c58dc9%2C43333%2C1733708943349.1733708944675 2024-12-09T01:49:15,827 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45761:45761),(127.0.0.1/127.0.0.1:37463:37463)] 2024-12-09T01:49:15,838 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548., hostname=ef6f18c58dc9,43333,1733708943349, seqNum=2] 2024-12-09T01:49:27,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43333 {}] regionserver.HRegion(8855): Flush requested on 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:27,880 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8c2a461bcd4182ee1839cc12c901c548 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T01:49:27,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/98bfc6d9d4df41e0ba4a0acd53faba6a is 1080, key is row0001/info:/1733708955841/Put/seqid=0 2024-12-09T01:49:27,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741838_1014 (size=12509) 2024-12-09T01:49:27,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741838_1014 (size=12509) 2024-12-09T01:49:27,962 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/98bfc6d9d4df41e0ba4a0acd53faba6a 2024-12-09T01:49:28,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/98bfc6d9d4df41e0ba4a0acd53faba6a as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/98bfc6d9d4df41e0ba4a0acd53faba6a 2024-12-09T01:49:28,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/98bfc6d9d4df41e0ba4a0acd53faba6a, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T01:49:28,039 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 8c2a461bcd4182ee1839cc12c901c548 in 160ms, sequenceid=11, compaction requested=false 2024-12-09T01:49:28,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8c2a461bcd4182ee1839cc12c901c548: 2024-12-09T01:49:31,689 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T01:49:35,889 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C43333%2C1733708943349.1733708975889 2024-12-09T01:49:36,098 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK], DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK]] 2024-12-09T01:49:36,098 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:36,098 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:36,099 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:36,099 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:36,099 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:36,099 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708955780 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708975889 2024-12-09T01:49:36,100 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37463:37463),(127.0.0.1/127.0.0.1:45761:45761)] 2024-12-09T01:49:36,100 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708955780 is not closed yet, will try archiving it next time 2024-12-09T01:49:36,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741837_1013 (size=12399) 2024-12-09T01:49:36,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741837_1013 (size=12399) 2024-12-09T01:49:36,304 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:38,509 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:40,714 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:42,918 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:42,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43333 {}] regionserver.HRegion(8855): Flush requested on 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:49:42,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8c2a461bcd4182ee1839cc12c901c548 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T01:49:43,121 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:43,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/14c4affd5c904a0b80643a0154741bbe is 1080, key is row0008/info:/1733708969877/Put/seqid=0 2024-12-09T01:49:43,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741840_1016 (size=12509) 2024-12-09T01:49:43,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741840_1016 (size=12509) 2024-12-09T01:49:43,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/14c4affd5c904a0b80643a0154741bbe 2024-12-09T01:49:43,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/14c4affd5c904a0b80643a0154741bbe as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/14c4affd5c904a0b80643a0154741bbe 2024-12-09T01:49:43,159 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/14c4affd5c904a0b80643a0154741bbe, entries=7, sequenceid=21, filesize=12.2 K 2024-12-09T01:49:43,361 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:43,361 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 8c2a461bcd4182ee1839cc12c901c548 in 442ms, sequenceid=21, compaction requested=false 2024-12-09T01:49:43,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8c2a461bcd4182ee1839cc12c901c548: 2024-12-09T01:49:43,361 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-12-09T01:49:43,361 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:49:43,362 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/98bfc6d9d4df41e0ba4a0acd53faba6a because midkey is the same as first or last row 2024-12-09T01:49:45,123 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:45,537 INFO [master/ef6f18c58dc9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T01:49:45,537 INFO [master/ef6f18c58dc9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T01:49:47,327 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:47,329 WARN [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:47,330 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C43333%2C1733708943349:(num 1733708975889) roll requested 2024-12-09T01:49:47,331 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C43333%2C1733708943349.1733708987331 2024-12-09T01:49:47,540 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 206 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:47,540 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:47,540 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:47,540 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:47,540 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:47,541 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:49:47,541 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708975889 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708987331 2024-12-09T01:49:47,542 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37463:37463),(127.0.0.1/127.0.0.1:45761:45761)] 2024-12-09T01:49:47,542 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708975889 is not closed yet, will try archiving it next time 2024-12-09T01:49:47,542 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708955780 to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/oldWALs/ef6f18c58dc9%2C43333%2C1733708943349.1733708955780 2024-12-09T01:49:47,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741839_1015 (size=7739) 2024-12-09T01:49:47,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741839_1015 (size=7739) 2024-12-09T01:49:49,532 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:51,125 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8c2a461bcd4182ee1839cc12c901c548, had cached 0 bytes from a total of 25018 2024-12-09T01:49:51,738 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:53,943 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:56,147 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:49:58,149 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T01:49:58,149 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C43333%2C1733708943349.1733708998149 2024-12-09T01:50:01,690 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T01:50:03,163 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5010 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:50:03,165 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5010 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:50:03,165 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C43333%2C1733708943349:(num 1733708998149) roll requested 2024-12-09T01:50:03,165 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:03,165 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:03,165 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:03,165 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:03,166 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:03,166 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708987331 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708998149 2024-12-09T01:50:03,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741841_1017 (size=4753) 2024-12-09T01:50:03,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741841_1017 (size=4753) 2024-12-09T01:50:03,172 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37463:37463),(127.0.0.1/127.0.0.1:45761:45761)] 2024-12-09T01:50:03,172 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708987331 is not closed yet, will try archiving it next time 2024-12-09T01:50:03,173 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C43333%2C1733708943349.1733709003172 2024-12-09T01:50:08,176 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:50:08,176 WARN [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:50:08,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43333 {}] regionserver.HRegion(8855): Flush requested on 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:50:08,177 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8c2a461bcd4182ee1839cc12c901c548 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T01:50:08,181 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:50:08,181 WARN [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:50:10,177 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T01:50:13,179 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:50:13,179 WARN [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:50:13,179 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:13,179 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:13,179 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:13,179 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:13,180 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:13,180 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708998149 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733709003172 2024-12-09T01:50:13,181 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37463:37463),(127.0.0.1/127.0.0.1:45761:45761)] 2024-12-09T01:50:13,181 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708998149 is not closed yet, will try archiving it next time 2024-12-09T01:50:13,181 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C43333%2C1733708943349:(num 1733709003172) roll requested 2024-12-09T01:50:13,182 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C43333%2C1733708943349.1733709013181 2024-12-09T01:50:13,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741842_1018 (size=1569) 2024-12-09T01:50:13,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741842_1018 (size=1569) 2024-12-09T01:50:13,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/7720eea6f0cb4cb9b7cd598475d283a7 is 1080, key is row0015/info:/1733708984921/Put/seqid=0 2024-12-09T01:50:13,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741844_1020 (size=12509) 2024-12-09T01:50:13,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741844_1020 (size=12509) 2024-12-09T01:50:13,193 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/7720eea6f0cb4cb9b7cd598475d283a7 2024-12-09T01:50:13,203 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/7720eea6f0cb4cb9b7cd598475d283a7 as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/7720eea6f0cb4cb9b7cd598475d283a7 2024-12-09T01:50:13,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/7720eea6f0cb4cb9b7cd598475d283a7, entries=7, sequenceid=31, filesize=12.2 K 2024-12-09T01:50:18,189 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5006 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:50:18,190 WARN [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5006 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:50:18,214 INFO [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:50:18,214 WARN [FSHLog-0-hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1-prefix:ef6f18c58dc9,43333,1733708943349 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44689,DS-f744e06b-dd44-4e49-bd2a-0910fb7861c4,DISK], DatanodeInfoWithStorage[127.0.0.1:41677,DS-8dfb32e1-091e-4156-9ddb-6e02dde91a3e,DISK]] 2024-12-09T01:50:18,214 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 8c2a461bcd4182ee1839cc12c901c548 in 10038ms, sequenceid=31, compaction requested=true 2024-12-09T01:50:18,214 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8c2a461bcd4182ee1839cc12c901c548: 2024-12-09T01:50:18,214 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,214 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,214 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-12-09T01:50:18,215 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:50:18,215 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,215 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/98bfc6d9d4df41e0ba4a0acd53faba6a because midkey is the same as first or last row 2024-12-09T01:50:18,215 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,215 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733709003172 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733709013181 2024-12-09T01:50:18,216 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37463:37463),(127.0.0.1/127.0.0.1:45761:45761)] 2024-12-09T01:50:18,216 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733709003172 is not closed yet, will try archiving it next time 2024-12-09T01:50:18,216 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708975889 to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/oldWALs/ef6f18c58dc9%2C43333%2C1733708943349.1733708975889 2024-12-09T01:50:18,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8c2a461bcd4182ee1839cc12c901c548:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T01:50:18,216 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C43333%2C1733708943349:(num 1733709018216) roll requested 2024-12-09T01:50:18,217 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C43333%2C1733708943349.1733709018216 2024-12-09T01:50:18,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741843_1019 (size=438) 2024-12-09T01:50:18,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741843_1019 (size=438) 2024-12-09T01:50:18,219 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T01:50:18,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:50:18,222 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708987331 to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/oldWALs/ef6f18c58dc9%2C43333%2C1733708943349.1733708987331 2024-12-09T01:50:18,224 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T01:50:18,224 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733708998149 to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/oldWALs/ef6f18c58dc9%2C43333%2C1733708943349.1733708998149 2024-12-09T01:50:18,226 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733709003172 to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/oldWALs/ef6f18c58dc9%2C43333%2C1733708943349.1733709003172 2024-12-09T01:50:18,226 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.HStore(1541): 8c2a461bcd4182ee1839cc12c901c548/info is initiating minor compaction (all files) 2024-12-09T01:50:18,226 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,226 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,226 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,226 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,226 INFO [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8c2a461bcd4182ee1839cc12c901c548/info in TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:50:18,226 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,227 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733709013181 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733709018216 2024-12-09T01:50:18,227 INFO [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/98bfc6d9d4df41e0ba4a0acd53faba6a, hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/14c4affd5c904a0b80643a0154741bbe, hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/7720eea6f0cb4cb9b7cd598475d283a7] into tmpdir=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp, totalSize=36.6 K 2024-12-09T01:50:18,228 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] compactions.Compactor(225): Compacting 98bfc6d9d4df41e0ba4a0acd53faba6a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733708955841 2024-12-09T01:50:18,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741845_1021 (size=93) 2024-12-09T01:50:18,229 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] compactions.Compactor(225): Compacting 14c4affd5c904a0b80643a0154741bbe, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1733708969877 2024-12-09T01:50:18,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741845_1021 (size=93) 2024-12-09T01:50:18,230 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733709013181 to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/oldWALs/ef6f18c58dc9%2C43333%2C1733708943349.1733709013181 2024-12-09T01:50:18,230 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7720eea6f0cb4cb9b7cd598475d283a7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1733708984921 2024-12-09T01:50:18,244 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45761:45761),(127.0.0.1/127.0.0.1:37463:37463)] 2024-12-09T01:50:18,244 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C43333%2C1733708943349.1733709018244 2024-12-09T01:50:18,260 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,260 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,260 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,260 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,260 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:18,260 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733709018216 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/WALs/ef6f18c58dc9,43333,1733708943349/ef6f18c58dc9%2C43333%2C1733708943349.1733709018244 2024-12-09T01:50:18,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741846_1022 (size=1258) 2024-12-09T01:50:18,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741846_1022 (size=1258) 2024-12-09T01:50:18,267 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45761:45761),(127.0.0.1/127.0.0.1:37463:37463)] 2024-12-09T01:50:18,274 INFO [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8c2a461bcd4182ee1839cc12c901c548#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:50:18,276 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/487f69eefbbe43b6b362c952c1bbc00b is 1080, key is row0001/info:/1733708955841/Put/seqid=0 2024-12-09T01:50:18,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741848_1024 (size=27710) 2024-12-09T01:50:18,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741848_1024 (size=27710) 2024-12-09T01:50:18,294 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/487f69eefbbe43b6b362c952c1bbc00b as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/487f69eefbbe43b6b362c952c1bbc00b 2024-12-09T01:50:18,310 INFO [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8c2a461bcd4182ee1839cc12c901c548/info of 8c2a461bcd4182ee1839cc12c901c548 into 487f69eefbbe43b6b362c952c1bbc00b(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:50:18,310 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8c2a461bcd4182ee1839cc12c901c548: 2024-12-09T01:50:18,312 INFO [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548., storeName=8c2a461bcd4182ee1839cc12c901c548/info, priority=13, startTime=1733709018216; duration=0sec 2024-12-09T01:50:18,312 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T01:50:18,312 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:50:18,312 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/487f69eefbbe43b6b362c952c1bbc00b because midkey is the same as first or last row 2024-12-09T01:50:18,312 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T01:50:18,312 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:50:18,313 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/487f69eefbbe43b6b362c952c1bbc00b because midkey is the same as first or last row 2024-12-09T01:50:18,313 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-12-09T01:50:18,313 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:50:18,313 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/487f69eefbbe43b6b362c952c1bbc00b because midkey is the same as first or last row 2024-12-09T01:50:18,313 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:50:18,313 DEBUG [RS:0;ef6f18c58dc9:43333-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8c2a461bcd4182ee1839cc12c901c548:info 2024-12-09T01:50:30,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43333 {}] regionserver.HRegion(8855): Flush requested on 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:50:30,270 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8c2a461bcd4182ee1839cc12c901c548 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T01:50:30,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/e745d9eecdc34446a80316c219bea061 is 1080, key is row0022/info:/1733709018246/Put/seqid=0 2024-12-09T01:50:30,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741849_1025 (size=12509) 2024-12-09T01:50:30,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741849_1025 (size=12509) 2024-12-09T01:50:30,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/e745d9eecdc34446a80316c219bea061 2024-12-09T01:50:30,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/e745d9eecdc34446a80316c219bea061 as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/e745d9eecdc34446a80316c219bea061 2024-12-09T01:50:30,306 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/e745d9eecdc34446a80316c219bea061, entries=7, sequenceid=42, filesize=12.2 K 2024-12-09T01:50:30,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 8c2a461bcd4182ee1839cc12c901c548 in 38ms, sequenceid=42, compaction requested=false 2024-12-09T01:50:30,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8c2a461bcd4182ee1839cc12c901c548: 2024-12-09T01:50:30,308 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-12-09T01:50:30,308 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:50:30,308 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/487f69eefbbe43b6b362c952c1bbc00b because midkey is the same as first or last row 2024-12-09T01:50:31,690 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T01:50:36,125 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 8c2a461bcd4182ee1839cc12c901c548, had cached 0 bytes from a total of 40219 2024-12-09T01:50:38,284 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T01:50:38,285 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:50:38,285 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:50:38,291 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:50:38,292 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:50:38,292 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T01:50:38,292 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T01:50:38,292 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=888866681, stopped=false 2024-12-09T01:50:38,292 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef6f18c58dc9,36653,1733708942619 2024-12-09T01:50:38,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:50:38,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:38,295 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:50:38,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:50:38,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:38,295 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:50:38,296 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:50:38,296 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:50:38,296 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:50:38,296 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:50:38,297 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,43333,1733708943349' ***** 2024-12-09T01:50:38,297 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:50:38,297 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:50:38,297 INFO [RS:0;ef6f18c58dc9:43333 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:50:38,297 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:50:38,298 INFO [RS:0;ef6f18c58dc9:43333 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:50:38,298 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(3091): Received CLOSE for 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:50:38,299 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,43333,1733708943349 2024-12-09T01:50:38,299 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:50:38,299 INFO [RS:0;ef6f18c58dc9:43333 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef6f18c58dc9:43333. 2024-12-09T01:50:38,299 DEBUG [RS:0;ef6f18c58dc9:43333 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:50:38,299 DEBUG [RS:0;ef6f18c58dc9:43333 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:50:38,299 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:50:38,299 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:50:38,299 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:50:38,299 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T01:50:38,300 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T01:50:38,300 DEBUG [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8c2a461bcd4182ee1839cc12c901c548=TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.} 2024-12-09T01:50:38,301 DEBUG [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8c2a461bcd4182ee1839cc12c901c548 2024-12-09T01:50:38,301 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8c2a461bcd4182ee1839cc12c901c548, disabling compactions & flushes 2024-12-09T01:50:38,301 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:50:38,301 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:50:38,301 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. after waiting 0 ms 2024-12-09T01:50:38,301 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:50:38,301 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:50:38,301 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:50:38,301 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:50:38,301 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:50:38,301 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:50:38,301 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 8c2a461bcd4182ee1839cc12c901c548 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-12-09T01:50:38,301 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-12-09T01:50:38,310 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/9247bed18b314f9f8d9c0dea869f0d3a is 1080, key is row0029/info:/1733709032272/Put/seqid=0 2024-12-09T01:50:38,329 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/.tmp/info/d4746c0af0a94a7597117ce208d37a2c is 195, key is TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548./info:regioninfo/1733708946172/Put/seqid=0 2024-12-09T01:50:38,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741850_1026 (size=8193) 2024-12-09T01:50:38,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741850_1026 (size=8193) 2024-12-09T01:50:38,333 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/9247bed18b314f9f8d9c0dea869f0d3a 2024-12-09T01:50:38,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741851_1027 (size=7016) 2024-12-09T01:50:38,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741851_1027 (size=7016) 2024-12-09T01:50:38,345 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/.tmp/info/d4746c0af0a94a7597117ce208d37a2c 2024-12-09T01:50:38,349 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/.tmp/info/9247bed18b314f9f8d9c0dea869f0d3a as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/9247bed18b314f9f8d9c0dea869f0d3a 2024-12-09T01:50:38,365 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/9247bed18b314f9f8d9c0dea869f0d3a, entries=3, sequenceid=48, filesize=8.0 K 2024-12-09T01:50:38,371 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 8c2a461bcd4182ee1839cc12c901c548 in 69ms, sequenceid=48, compaction requested=true 2024-12-09T01:50:38,373 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/98bfc6d9d4df41e0ba4a0acd53faba6a, hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/14c4affd5c904a0b80643a0154741bbe, hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/7720eea6f0cb4cb9b7cd598475d283a7] to archive 2024-12-09T01:50:38,378 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T01:50:38,384 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/.tmp/ns/0f41e6daf28040f79f8153813944c6ba is 43, key is default/ns:d/1733708945339/Put/seqid=0 2024-12-09T01:50:38,384 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/98bfc6d9d4df41e0ba4a0acd53faba6a to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/archive/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/98bfc6d9d4df41e0ba4a0acd53faba6a 2024-12-09T01:50:38,387 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/14c4affd5c904a0b80643a0154741bbe to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/archive/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/14c4affd5c904a0b80643a0154741bbe 2024-12-09T01:50:38,389 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/7720eea6f0cb4cb9b7cd598475d283a7 to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/archive/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/info/7720eea6f0cb4cb9b7cd598475d283a7 2024-12-09T01:50:38,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741852_1028 (size=5153) 2024-12-09T01:50:38,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741852_1028 (size=5153) 2024-12-09T01:50:38,397 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/.tmp/ns/0f41e6daf28040f79f8153813944c6ba 2024-12-09T01:50:38,406 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=ef6f18c58dc9:36653 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-09T01:50:38,411 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [98bfc6d9d4df41e0ba4a0acd53faba6a=12509, 14c4affd5c904a0b80643a0154741bbe=12509, 7720eea6f0cb4cb9b7cd598475d283a7=12509] 2024-12-09T01:50:38,421 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/default/TestLogRolling-testSlowSyncLogRolling/8c2a461bcd4182ee1839cc12c901c548/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-12-09T01:50:38,424 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:50:38,424 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8c2a461bcd4182ee1839cc12c901c548: Waiting for close lock at 1733709038300Running coprocessor pre-close hooks at 1733709038301 (+1 ms)Disabling compacts and flushes for region at 1733709038301Disabling writes for close at 1733709038301Obtaining lock to block concurrent updates at 1733709038301Preparing flush snapshotting stores in 8c2a461bcd4182ee1839cc12c901c548 at 1733709038301Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1733709038302 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. at 1733709038303 (+1 ms)Flushing 8c2a461bcd4182ee1839cc12c901c548/info: creating writer at 1733709038303Flushing 8c2a461bcd4182ee1839cc12c901c548/info: appending metadata at 1733709038309 (+6 ms)Flushing 8c2a461bcd4182ee1839cc12c901c548/info: closing flushed file at 1733709038309Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7428de8c: reopening flushed file at 1733709038345 (+36 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 8c2a461bcd4182ee1839cc12c901c548 in 69ms, sequenceid=48, compaction requested=true at 1733709038371 (+26 ms)Writing region close event to WAL at 1733709038413 (+42 ms)Running coprocessor post-close hooks at 1733709038422 (+9 ms)Closed at 1733709038424 (+2 ms) 2024-12-09T01:50:38,425 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1733708945645.8c2a461bcd4182ee1839cc12c901c548. 2024-12-09T01:50:38,441 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/.tmp/table/50d3e07837514b3a97c6d684f7385146 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1733708946197/Put/seqid=0 2024-12-09T01:50:38,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741853_1029 (size=5396) 2024-12-09T01:50:38,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741853_1029 (size=5396) 2024-12-09T01:50:38,451 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/.tmp/table/50d3e07837514b3a97c6d684f7385146 2024-12-09T01:50:38,474 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/.tmp/info/d4746c0af0a94a7597117ce208d37a2c as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/info/d4746c0af0a94a7597117ce208d37a2c 2024-12-09T01:50:38,486 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/info/d4746c0af0a94a7597117ce208d37a2c, entries=10, sequenceid=11, filesize=6.9 K 2024-12-09T01:50:38,487 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/.tmp/ns/0f41e6daf28040f79f8153813944c6ba as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/ns/0f41e6daf28040f79f8153813944c6ba 2024-12-09T01:50:38,499 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/ns/0f41e6daf28040f79f8153813944c6ba, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T01:50:38,501 DEBUG [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T01:50:38,501 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/.tmp/table/50d3e07837514b3a97c6d684f7385146 as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/table/50d3e07837514b3a97c6d684f7385146 2024-12-09T01:50:38,514 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/table/50d3e07837514b3a97c6d684f7385146, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T01:50:38,517 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 215ms, sequenceid=11, compaction requested=false 2024-12-09T01:50:38,517 INFO [regionserver/ef6f18c58dc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T01:50:38,517 INFO [regionserver/ef6f18c58dc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T01:50:38,519 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:50:38,544 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T01:50:38,546 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:50:38,546 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:50:38,546 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709038301Running coprocessor pre-close hooks at 1733709038301Disabling compacts and flushes for region at 1733709038301Disabling writes for close at 1733709038301Obtaining lock to block concurrent updates at 1733709038301Preparing flush snapshotting stores in 1588230740 at 1733709038301Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1733709038302 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733709038303 (+1 ms)Flushing 1588230740/info: creating writer at 1733709038303Flushing 1588230740/info: appending metadata at 1733709038329 (+26 ms)Flushing 1588230740/info: closing flushed file at 1733709038329Flushing 1588230740/ns: creating writer at 1733709038360 (+31 ms)Flushing 1588230740/ns: appending metadata at 1733709038383 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1733709038383Flushing 1588230740/table: creating writer at 1733709038414 (+31 ms)Flushing 1588230740/table: appending metadata at 1733709038440 (+26 ms)Flushing 1588230740/table: closing flushed file at 1733709038440Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5e1e4953: reopening flushed file at 1733709038472 (+32 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f52e039: reopening flushed file at 1733709038486 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a90d6b7: reopening flushed file at 1733709038499 (+13 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 215ms, sequenceid=11, compaction requested=false at 1733709038517 (+18 ms)Writing region close event to WAL at 1733709038529 (+12 ms)Running coprocessor post-close hooks at 1733709038546 (+17 ms)Closed at 1733709038546 2024-12-09T01:50:38,547 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T01:50:38,701 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,43333,1733708943349; all regions closed. 2024-12-09T01:50:38,703 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:38,703 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:38,703 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:38,704 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:38,704 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:38,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741834_1010 (size=3066) 2024-12-09T01:50:38,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741834_1010 (size=3066) 2024-12-09T01:50:38,715 DEBUG [RS:0;ef6f18c58dc9:43333 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/oldWALs 2024-12-09T01:50:38,715 INFO [RS:0;ef6f18c58dc9:43333 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C43333%2C1733708943349.meta:.meta(num 1733708945113) 2024-12-09T01:50:38,716 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:38,716 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:38,716 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:38,717 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:38,717 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:38,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741847_1023 (size=12695) 2024-12-09T01:50:38,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741847_1023 (size=12695) 2024-12-09T01:50:38,726 DEBUG [RS:0;ef6f18c58dc9:43333 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/oldWALs 2024-12-09T01:50:38,727 INFO [RS:0;ef6f18c58dc9:43333 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C43333%2C1733708943349:(num 1733709018244) 2024-12-09T01:50:38,727 DEBUG [RS:0;ef6f18c58dc9:43333 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:50:38,727 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:50:38,727 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:50:38,727 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T01:50:38,727 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:50:38,728 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:50:38,728 INFO [RS:0;ef6f18c58dc9:43333 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43333 2024-12-09T01:50:38,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:50:38,734 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:50:38,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,43333,1733708943349 2024-12-09T01:50:38,738 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,43333,1733708943349] 2024-12-09T01:50:38,740 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,43333,1733708943349 already deleted, retry=false 2024-12-09T01:50:38,740 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,43333,1733708943349 expired; onlineServers=0 2024-12-09T01:50:38,740 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef6f18c58dc9,36653,1733708942619' ***** 2024-12-09T01:50:38,740 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T01:50:38,740 INFO [M:0;ef6f18c58dc9:36653 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:50:38,740 INFO [M:0;ef6f18c58dc9:36653 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:50:38,740 DEBUG [M:0;ef6f18c58dc9:36653 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T01:50:38,741 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T01:50:38,741 DEBUG [M:0;ef6f18c58dc9:36653 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T01:50:38,741 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733708944354 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733708944354,5,FailOnTimeoutGroup] 2024-12-09T01:50:38,741 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733708944353 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733708944353,5,FailOnTimeoutGroup] 2024-12-09T01:50:38,741 INFO [M:0;ef6f18c58dc9:36653 {}] hbase.ChoreService(370): Chore service for: master/ef6f18c58dc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T01:50:38,741 INFO [M:0;ef6f18c58dc9:36653 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:50:38,741 DEBUG [M:0;ef6f18c58dc9:36653 {}] master.HMaster(1795): Stopping service threads 2024-12-09T01:50:38,741 INFO [M:0;ef6f18c58dc9:36653 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T01:50:38,741 INFO [M:0;ef6f18c58dc9:36653 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:50:38,742 INFO [M:0;ef6f18c58dc9:36653 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T01:50:38,742 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T01:50:38,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T01:50:38,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:38,745 DEBUG [M:0;ef6f18c58dc9:36653 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-12-09T01:50:38,746 DEBUG [M:0;ef6f18c58dc9:36653 {}] master.ActiveMasterManager(353): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-12-09T01:50:38,746 INFO [M:0;ef6f18c58dc9:36653 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/.lastflushedseqids 2024-12-09T01:50:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741854_1030 (size=130) 2024-12-09T01:50:38,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741854_1030 (size=130) 2024-12-09T01:50:38,766 INFO [M:0;ef6f18c58dc9:36653 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T01:50:38,766 INFO [M:0;ef6f18c58dc9:36653 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T01:50:38,766 DEBUG [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:50:38,766 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:38,766 DEBUG [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:38,766 DEBUG [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:50:38,766 DEBUG [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:38,767 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.01 KB heapSize=29.18 KB 2024-12-09T01:50:38,794 DEBUG [M:0;ef6f18c58dc9:36653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5cd8afc0bbb647b6bd1d50d6d486e39c is 82, key is hbase:meta,,1/info:regioninfo/1733708945233/Put/seqid=0 2024-12-09T01:50:38,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741855_1031 (size=5672) 2024-12-09T01:50:38,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741855_1031 (size=5672) 2024-12-09T01:50:38,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:50:38,839 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43333-0x1007476c2af0001, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:50:38,839 INFO [RS:0;ef6f18c58dc9:43333 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:50:38,839 INFO [RS:0;ef6f18c58dc9:43333 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,43333,1733708943349; zookeeper connection closed. 2024-12-09T01:50:38,840 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4dca2d42 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4dca2d42 2024-12-09T01:50:38,841 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T01:50:39,210 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5cd8afc0bbb647b6bd1d50d6d486e39c 2024-12-09T01:50:39,238 DEBUG [M:0;ef6f18c58dc9:36653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/22831e27087444f2b1f5d954b4ec7512 is 765, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733708946209/Put/seqid=0 2024-12-09T01:50:39,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741856_1032 (size=6246) 2024-12-09T01:50:39,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741856_1032 (size=6246) 2024-12-09T01:50:39,258 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.41 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/22831e27087444f2b1f5d954b4ec7512 2024-12-09T01:50:39,267 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 22831e27087444f2b1f5d954b4ec7512 2024-12-09T01:50:39,288 DEBUG [M:0;ef6f18c58dc9:36653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/80b6350c218c4405a6476e01c2ebcf24 is 69, key is ef6f18c58dc9,43333,1733708943349/rs:state/1733708944416/Put/seqid=0 2024-12-09T01:50:39,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741857_1033 (size=5156) 2024-12-09T01:50:39,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741857_1033 (size=5156) 2024-12-09T01:50:39,302 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/80b6350c218c4405a6476e01c2ebcf24 2024-12-09T01:50:39,340 DEBUG [M:0;ef6f18c58dc9:36653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/20d47390d18342318f2e4289e55af866 is 52, key is load_balancer_on/state:d/1733708945614/Put/seqid=0 2024-12-09T01:50:39,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741858_1034 (size=5056) 2024-12-09T01:50:39,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741858_1034 (size=5056) 2024-12-09T01:50:39,356 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/20d47390d18342318f2e4289e55af866 2024-12-09T01:50:39,368 DEBUG [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5cd8afc0bbb647b6bd1d50d6d486e39c as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5cd8afc0bbb647b6bd1d50d6d486e39c 2024-12-09T01:50:39,376 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5cd8afc0bbb647b6bd1d50d6d486e39c, entries=8, sequenceid=59, filesize=5.5 K 2024-12-09T01:50:39,377 DEBUG [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/22831e27087444f2b1f5d954b4ec7512 as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/22831e27087444f2b1f5d954b4ec7512 2024-12-09T01:50:39,386 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 22831e27087444f2b1f5d954b4ec7512 2024-12-09T01:50:39,386 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/22831e27087444f2b1f5d954b4ec7512, entries=6, sequenceid=59, filesize=6.1 K 2024-12-09T01:50:39,388 DEBUG [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/80b6350c218c4405a6476e01c2ebcf24 as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/80b6350c218c4405a6476e01c2ebcf24 2024-12-09T01:50:39,396 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/80b6350c218c4405a6476e01c2ebcf24, entries=1, sequenceid=59, filesize=5.0 K 2024-12-09T01:50:39,397 DEBUG [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/20d47390d18342318f2e4289e55af866 as hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/20d47390d18342318f2e4289e55af866 2024-12-09T01:50:39,405 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/20d47390d18342318f2e4289e55af866, entries=1, sequenceid=59, filesize=4.9 K 2024-12-09T01:50:39,406 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 640ms, sequenceid=59, compaction requested=false 2024-12-09T01:50:39,425 INFO [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:39,425 DEBUG [M:0;ef6f18c58dc9:36653 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709038766Disabling compacts and flushes for region at 1733709038766Disabling writes for close at 1733709038766Obtaining lock to block concurrent updates at 1733709038767 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733709038767Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23564, getHeapSize=29816, getOffHeapSize=0, getCellsCount=70 at 1733709038767Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733709038769 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733709038769Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733709038793 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733709038793Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733709039219 (+426 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733709039237 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733709039237Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733709039267 (+30 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733709039288 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733709039288Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733709039311 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733709039339 (+28 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733709039339Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@137a71e1: reopening flushed file at 1733709039366 (+27 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61795b8c: reopening flushed file at 1733709039376 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@60bd577c: reopening flushed file at 1733709039387 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2aecde05: reopening flushed file at 1733709039396 (+9 ms)Finished flush of dataSize ~23.01 KB/23564, heapSize ~29.12 KB/29816, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 640ms, sequenceid=59, compaction requested=false at 1733709039406 (+10 ms)Writing region close event to WAL at 1733709039425 (+19 ms)Closed at 1733709039425 2024-12-09T01:50:39,426 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:39,427 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:39,427 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:39,427 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:39,427 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:39,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44689 is added to blk_1073741830_1006 (size=27961) 2024-12-09T01:50:39,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41677 is added to blk_1073741830_1006 (size=27961) 2024-12-09T01:50:39,432 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:50:39,432 INFO [M:0;ef6f18c58dc9:36653 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T01:50:39,433 INFO [M:0;ef6f18c58dc9:36653 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36653 2024-12-09T01:50:39,433 INFO [M:0;ef6f18c58dc9:36653 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:50:39,536 INFO [M:0;ef6f18c58dc9:36653 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:50:39,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:50:39,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36653-0x1007476c2af0000, quorum=127.0.0.1:54870, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:50:39,541 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:39,544 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:50:39,544 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:50:39,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:50:39,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/hadoop.log.dir/,STOPPED} 2024-12-09T01:50:39,550 WARN [BP-29672646-172.17.0.2-1733708939553 heartbeating to localhost/127.0.0.1:36525 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:50:39,550 WARN [BP-29672646-172.17.0.2-1733708939553 heartbeating to localhost/127.0.0.1:36525 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-29672646-172.17.0.2-1733708939553 (Datanode Uuid 4e0fe31b-1c5e-4376-96c7-f63f7fc8806c) service to localhost/127.0.0.1:36525 2024-12-09T01:50:39,552 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/cluster_ea14b0e5-c9f4-0c90-de5e-70a9af0a8331/data/data3/current/BP-29672646-172.17.0.2-1733708939553 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:39,552 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/cluster_ea14b0e5-c9f4-0c90-de5e-70a9af0a8331/data/data4/current/BP-29672646-172.17.0.2-1733708939553 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:39,552 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:50:39,552 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:50:39,553 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:50:39,556 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:39,557 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:50:39,557 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:50:39,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:50:39,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/hadoop.log.dir/,STOPPED} 2024-12-09T01:50:39,560 WARN [BP-29672646-172.17.0.2-1733708939553 heartbeating to localhost/127.0.0.1:36525 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:50:39,560 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:50:39,560 WARN [BP-29672646-172.17.0.2-1733708939553 heartbeating to localhost/127.0.0.1:36525 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-29672646-172.17.0.2-1733708939553 (Datanode Uuid 78149fd4-bedc-4bb1-be68-887f779a4dde) service to localhost/127.0.0.1:36525 2024-12-09T01:50:39,560 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:50:39,561 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/cluster_ea14b0e5-c9f4-0c90-de5e-70a9af0a8331/data/data1/current/BP-29672646-172.17.0.2-1733708939553 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:39,561 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/cluster_ea14b0e5-c9f4-0c90-de5e-70a9af0a8331/data/data2/current/BP-29672646-172.17.0.2-1733708939553 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:39,562 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:50:39,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:50:39,578 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:50:39,578 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:50:39,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:50:39,578 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/hadoop.log.dir/,STOPPED} 2024-12-09T01:50:39,589 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T01:50:39,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T01:50:39,645 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=76 (was 12) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36525 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/ef6f18c58dc9:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@efc635d java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36525 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36525 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36525 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36525 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: master/ef6f18c58dc9:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36525 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: master/ef6f18c58dc9:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36525 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:36525 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=267 (was 393), ProcessCount=11 (was 11), AvailableMemoryMB=12225 (was 12331) 2024-12-09T01:50:39,654 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=77, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=267, ProcessCount=11, AvailableMemoryMB=12224 2024-12-09T01:50:39,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T01:50:39,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/hadoop.log.dir so I do NOT create it in target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0 2024-12-09T01:50:39,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fedf1c79-7863-ebac-2aa0-33c675ed0552/hadoop.tmp.dir so I do NOT create it in target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0 2024-12-09T01:50:39,655 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/cluster_c2874f64-cea7-5d0b-2735-033987403834, deleteOnExit=true 2024-12-09T01:50:39,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T01:50:39,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/test.cache.data in system properties and HBase conf 2024-12-09T01:50:39,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T01:50:39,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/hadoop.log.dir in system properties and HBase conf 2024-12-09T01:50:39,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T01:50:39,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T01:50:39,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T01:50:39,655 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T01:50:39,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:50:39,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:50:39,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T01:50:39,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:50:39,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T01:50:39,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T01:50:39,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:50:39,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:50:39,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T01:50:39,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/nfs.dump.dir in system properties and HBase conf 2024-12-09T01:50:39,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/java.io.tmpdir in system properties and HBase conf 2024-12-09T01:50:39,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:50:39,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T01:50:39,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T01:50:39,677 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:50:39,764 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:50:39,776 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:50:39,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:50:39,777 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:50:39,778 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:50:39,779 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:50:39,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46d26a79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:50:39,780 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59505eb5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:50:39,954 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1edca743{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/java.io.tmpdir/jetty-localhost-36869-hadoop-hdfs-3_4_1-tests_jar-_-any-5845971205576202386/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:50:39,955 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c443180{HTTP/1.1, (http/1.1)}{localhost:36869} 2024-12-09T01:50:39,955 INFO [Time-limited test {}] server.Server(415): Started @102693ms 2024-12-09T01:50:39,978 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:50:40,112 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:50:40,121 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:50:40,124 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:50:40,124 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:50:40,124 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:50:40,125 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48bb784e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:50:40,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69a0f3c6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:50:40,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@641eaf99{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/java.io.tmpdir/jetty-localhost-37879-hadoop-hdfs-3_4_1-tests_jar-_-any-415518358403500227/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:40,245 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@276f8783{HTTP/1.1, (http/1.1)}{localhost:37879} 2024-12-09T01:50:40,245 INFO [Time-limited test {}] server.Server(415): Started @102982ms 2024-12-09T01:50:40,246 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:50:40,289 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:50:40,293 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:50:40,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:50:40,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:50:40,294 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:50:40,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f11c100{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:50:40,296 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@194f043a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:50:40,351 WARN [Thread-438 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/cluster_c2874f64-cea7-5d0b-2735-033987403834/data/data1/current/BP-250701132-172.17.0.2-1733709039698/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:40,351 WARN [Thread-439 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/cluster_c2874f64-cea7-5d0b-2735-033987403834/data/data2/current/BP-250701132-172.17.0.2-1733709039698/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:40,377 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:50:40,381 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15ee483fa456377b with lease ID 0xbc59f09dd59acb4e: Processing first storage report for DS-cd842ee2-0cb5-4675-abbc-09470dd54532 from datanode DatanodeRegistration(127.0.0.1:41297, datanodeUuid=126cd986-b6f7-4136-90c3-b882c49e78f2, infoPort=46473, infoSecurePort=0, ipcPort=34577, storageInfo=lv=-57;cid=testClusterID;nsid=418294223;c=1733709039698) 2024-12-09T01:50:40,381 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15ee483fa456377b with lease ID 0xbc59f09dd59acb4e: from storage DS-cd842ee2-0cb5-4675-abbc-09470dd54532 node DatanodeRegistration(127.0.0.1:41297, datanodeUuid=126cd986-b6f7-4136-90c3-b882c49e78f2, infoPort=46473, infoSecurePort=0, ipcPort=34577, storageInfo=lv=-57;cid=testClusterID;nsid=418294223;c=1733709039698), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:50:40,381 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x15ee483fa456377b with lease ID 0xbc59f09dd59acb4e: Processing first storage report for DS-a52e5288-8a78-4341-80f5-937f7013221e from datanode DatanodeRegistration(127.0.0.1:41297, datanodeUuid=126cd986-b6f7-4136-90c3-b882c49e78f2, infoPort=46473, infoSecurePort=0, ipcPort=34577, storageInfo=lv=-57;cid=testClusterID;nsid=418294223;c=1733709039698) 2024-12-09T01:50:40,381 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x15ee483fa456377b with lease ID 0xbc59f09dd59acb4e: from storage DS-a52e5288-8a78-4341-80f5-937f7013221e node DatanodeRegistration(127.0.0.1:41297, datanodeUuid=126cd986-b6f7-4136-90c3-b882c49e78f2, infoPort=46473, infoSecurePort=0, ipcPort=34577, storageInfo=lv=-57;cid=testClusterID;nsid=418294223;c=1733709039698), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:50:40,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b95e72c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/java.io.tmpdir/jetty-localhost-46457-hadoop-hdfs-3_4_1-tests_jar-_-any-17081878531339781531/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:40,421 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@376d199b{HTTP/1.1, (http/1.1)}{localhost:46457} 2024-12-09T01:50:40,421 INFO [Time-limited test {}] server.Server(415): Started @103158ms 2024-12-09T01:50:40,423 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:50:40,532 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/cluster_c2874f64-cea7-5d0b-2735-033987403834/data/data3/current/BP-250701132-172.17.0.2-1733709039698/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:40,533 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/cluster_c2874f64-cea7-5d0b-2735-033987403834/data/data4/current/BP-250701132-172.17.0.2-1733709039698/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:40,552 WARN [Thread-453 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:50:40,555 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1a2f65914ae914e with lease ID 0xbc59f09dd59acb4f: Processing first storage report for DS-62e13821-d181-4e6d-96d5-4a11caa5a323 from datanode DatanodeRegistration(127.0.0.1:34405, datanodeUuid=c6676e61-aec8-42d0-bd92-d1534fefe8fe, infoPort=34427, infoSecurePort=0, ipcPort=38889, storageInfo=lv=-57;cid=testClusterID;nsid=418294223;c=1733709039698) 2024-12-09T01:50:40,555 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1a2f65914ae914e with lease ID 0xbc59f09dd59acb4f: from storage DS-62e13821-d181-4e6d-96d5-4a11caa5a323 node DatanodeRegistration(127.0.0.1:34405, datanodeUuid=c6676e61-aec8-42d0-bd92-d1534fefe8fe, infoPort=34427, infoSecurePort=0, ipcPort=38889, storageInfo=lv=-57;cid=testClusterID;nsid=418294223;c=1733709039698), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:50:40,555 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd1a2f65914ae914e with lease ID 0xbc59f09dd59acb4f: Processing first storage report for DS-e04d5072-cf79-4f2b-a288-420d592f18ba from datanode DatanodeRegistration(127.0.0.1:34405, datanodeUuid=c6676e61-aec8-42d0-bd92-d1534fefe8fe, infoPort=34427, infoSecurePort=0, ipcPort=38889, storageInfo=lv=-57;cid=testClusterID;nsid=418294223;c=1733709039698) 2024-12-09T01:50:40,555 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd1a2f65914ae914e with lease ID 0xbc59f09dd59acb4f: from storage DS-e04d5072-cf79-4f2b-a288-420d592f18ba node DatanodeRegistration(127.0.0.1:34405, datanodeUuid=c6676e61-aec8-42d0-bd92-d1534fefe8fe, infoPort=34427, infoSecurePort=0, ipcPort=38889, storageInfo=lv=-57;cid=testClusterID;nsid=418294223;c=1733709039698), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:50:40,653 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0 2024-12-09T01:50:40,656 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/cluster_c2874f64-cea7-5d0b-2735-033987403834/zookeeper_0, clientPort=64800, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/cluster_c2874f64-cea7-5d0b-2735-033987403834/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/cluster_c2874f64-cea7-5d0b-2735-033987403834/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T01:50:40,657 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64800 2024-12-09T01:50:40,658 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:40,659 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:40,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:50:40,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:50:40,672 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b with version=8 2024-12-09T01:50:40,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/hbase-staging 2024-12-09T01:50:40,675 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:50:40,675 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:40,675 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:40,675 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:50:40,675 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:40,676 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:50:40,676 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T01:50:40,676 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:50:40,677 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34721 2024-12-09T01:50:40,678 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34721 connecting to ZooKeeper ensemble=127.0.0.1:64800 2024-12-09T01:50:40,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:347210x0, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:50:40,685 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34721-0x100747844f60000 connected 2024-12-09T01:50:40,701 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:40,703 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:40,706 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:50:40,706 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b, hbase.cluster.distributed=false 2024-12-09T01:50:40,708 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:50:40,710 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34721 2024-12-09T01:50:40,710 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34721 2024-12-09T01:50:40,710 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34721 2024-12-09T01:50:40,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34721 2024-12-09T01:50:40,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34721 2024-12-09T01:50:40,728 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:50:40,728 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:40,728 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:40,729 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:50:40,729 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:40,729 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:50:40,729 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:50:40,729 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:50:40,730 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33525 2024-12-09T01:50:40,731 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33525 connecting to ZooKeeper ensemble=127.0.0.1:64800 2024-12-09T01:50:40,732 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:40,734 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:40,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:335250x0, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:50:40,741 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:335250x0, quorum=127.0.0.1:64800, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:50:40,741 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33525-0x100747844f60001 connected 2024-12-09T01:50:40,741 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:50:40,744 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:50:40,744 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:50:40,746 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:50:40,746 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33525 2024-12-09T01:50:40,747 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33525 2024-12-09T01:50:40,752 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33525 2024-12-09T01:50:40,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33525 2024-12-09T01:50:40,756 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33525 2024-12-09T01:50:40,776 DEBUG [M:0;ef6f18c58dc9:34721 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef6f18c58dc9:34721 2024-12-09T01:50:40,777 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef6f18c58dc9,34721,1733709040675 2024-12-09T01:50:40,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:50:40,779 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:50:40,780 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef6f18c58dc9,34721,1733709040675 2024-12-09T01:50:40,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:50:40,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:40,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:40,783 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T01:50:40,784 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef6f18c58dc9,34721,1733709040675 from backup master directory 2024-12-09T01:50:40,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef6f18c58dc9,34721,1733709040675 2024-12-09T01:50:40,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:50:40,786 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:50:40,786 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:50:40,786 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef6f18c58dc9,34721,1733709040675 2024-12-09T01:50:40,793 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/hbase.id] with ID: 9cb6e7ef-609a-427b-98a1-9e99301a6126 2024-12-09T01:50:40,793 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/.tmp/hbase.id 2024-12-09T01:50:40,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:50:40,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:50:40,804 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/.tmp/hbase.id]:[hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/hbase.id] 2024-12-09T01:50:40,822 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:40,823 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T01:50:40,825 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T01:50:40,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:40,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:40,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:50:40,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:50:40,836 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:50:40,838 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T01:50:40,838 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:50:40,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:50:40,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:50:40,848 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store 2024-12-09T01:50:40,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:50:40,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:50:40,857 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:50:40,857 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:50:40,857 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:40,857 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:40,857 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:50:40,858 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:40,858 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:40,858 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709040857Disabling compacts and flushes for region at 1733709040857Disabling writes for close at 1733709040858 (+1 ms)Writing region close event to WAL at 1733709040858Closed at 1733709040858 2024-12-09T01:50:40,860 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/.initializing 2024-12-09T01:50:40,860 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/WALs/ef6f18c58dc9,34721,1733709040675 2024-12-09T01:50:40,864 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C34721%2C1733709040675, suffix=, logDir=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/WALs/ef6f18c58dc9,34721,1733709040675, archiveDir=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/oldWALs, maxLogs=10 2024-12-09T01:50:40,865 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C34721%2C1733709040675.1733709040864 2024-12-09T01:50:40,871 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/WALs/ef6f18c58dc9,34721,1733709040675/ef6f18c58dc9%2C34721%2C1733709040675.1733709040864 2024-12-09T01:50:40,874 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34427:34427),(127.0.0.1/127.0.0.1:46473:46473)] 2024-12-09T01:50:40,878 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:50:40,878 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:50:40,878 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:40,879 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:40,880 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:40,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T01:50:40,883 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:40,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:40,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:40,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T01:50:40,885 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:40,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:50:40,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:40,889 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T01:50:40,889 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:40,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:50:40,890 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:40,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T01:50:40,892 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:40,893 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:50:40,893 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:40,894 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:40,895 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:40,896 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:40,896 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:40,897 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:50:40,898 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:40,901 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:50:40,902 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=841687, jitterRate=0.07026153802871704}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:50:40,903 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733709040879Initializing all the Stores at 1733709040880 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709040880Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709040880Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709040880Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709040880Cleaning up temporary data from old regions at 1733709040896 (+16 ms)Region opened successfully at 1733709040903 (+7 ms) 2024-12-09T01:50:40,903 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T01:50:40,908 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@653cb19e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:50:40,909 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T01:50:40,910 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T01:50:40,910 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T01:50:40,910 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T01:50:40,911 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T01:50:40,911 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T01:50:40,911 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T01:50:40,915 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T01:50:40,916 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T01:50:40,917 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T01:50:40,918 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T01:50:40,919 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T01:50:40,921 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T01:50:40,921 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T01:50:40,923 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T01:50:40,924 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T01:50:40,925 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T01:50:40,926 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T01:50:40,929 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T01:50:40,930 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T01:50:40,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:50:40,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:50:40,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:40,932 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:40,933 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef6f18c58dc9,34721,1733709040675, sessionid=0x100747844f60000, setting cluster-up flag (Was=false) 2024-12-09T01:50:40,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:40,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:40,942 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T01:50:40,943 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,34721,1733709040675 2024-12-09T01:50:40,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:40,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:40,953 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T01:50:40,954 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,34721,1733709040675 2024-12-09T01:50:40,955 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T01:50:40,957 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T01:50:40,958 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T01:50:40,958 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T01:50:40,958 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef6f18c58dc9,34721,1733709040675 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T01:50:40,958 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(746): ClusterId : 9cb6e7ef-609a-427b-98a1-9e99301a6126 2024-12-09T01:50:40,958 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:50:40,960 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:50:40,961 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:50:40,961 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:50:40,961 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:50:40,961 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:50:40,961 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:50:40,961 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef6f18c58dc9:0, corePoolSize=10, maxPoolSize=10 2024-12-09T01:50:40,961 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:40,961 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:50:40,961 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:40,962 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733709070962 2024-12-09T01:50:40,962 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T01:50:40,962 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T01:50:40,962 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T01:50:40,962 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T01:50:40,963 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T01:50:40,963 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T01:50:40,963 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:40,963 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T01:50:40,963 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T01:50:40,963 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T01:50:40,963 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:50:40,963 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T01:50:40,964 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T01:50:40,964 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T01:50:40,964 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709040964,5,FailOnTimeoutGroup] 2024-12-09T01:50:40,964 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709040964,5,FailOnTimeoutGroup] 2024-12-09T01:50:40,964 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:40,964 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T01:50:40,964 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:40,964 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:40,965 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:40,965 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T01:50:40,965 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:50:40,966 DEBUG [RS:0;ef6f18c58dc9:33525 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cc9d309, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:50:40,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:50:40,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:50:40,976 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T01:50:40,976 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b 2024-12-09T01:50:40,980 DEBUG [RS:0;ef6f18c58dc9:33525 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef6f18c58dc9:33525 2024-12-09T01:50:40,980 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:50:40,980 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:50:40,980 DEBUG [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:50:40,981 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,34721,1733709040675 with port=33525, startcode=1733709040728 2024-12-09T01:50:40,981 DEBUG [RS:0;ef6f18c58dc9:33525 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:50:40,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:50:40,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:50:40,985 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:50:40,985 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46221, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:50:40,986 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34721 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,33525,1733709040728 2024-12-09T01:50:40,986 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34721 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,33525,1733709040728 2024-12-09T01:50:40,986 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:50:40,988 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:50:40,988 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:40,989 DEBUG [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b 2024-12-09T01:50:40,989 DEBUG [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38277 2024-12-09T01:50:40,989 DEBUG [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:50:40,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:40,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:50:40,991 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:50:40,991 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:40,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:40,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:50:40,993 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:50:40,993 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:40,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:40,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:50:40,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:50:40,995 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:50:40,996 DEBUG [RS:0;ef6f18c58dc9:33525 {}] zookeeper.ZKUtil(111): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,33525,1733709040728 2024-12-09T01:50:40,996 WARN [RS:0;ef6f18c58dc9:33525 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:50:40,996 INFO [RS:0;ef6f18c58dc9:33525 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:50:40,996 DEBUG [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/WALs/ef6f18c58dc9,33525,1733709040728 2024-12-09T01:50:40,996 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:40,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:40,997 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,33525,1733709040728] 2024-12-09T01:50:40,997 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:50:40,998 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/data/hbase/meta/1588230740 2024-12-09T01:50:40,999 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/data/hbase/meta/1588230740 2024-12-09T01:50:41,000 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:50:41,001 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:50:41,001 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:50:41,001 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:50:41,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:50:41,006 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:50:41,006 INFO [RS:0;ef6f18c58dc9:33525 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:50:41,006 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:50:41,007 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,007 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:50:41,007 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879005, jitterRate=0.11771342158317566}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:50:41,008 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:50:41,008 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,008 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:41,008 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733709040985Initializing all the Stores at 1733709040986 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709040986Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709040986Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709040986Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709040986Cleaning up temporary data from old regions at 1733709041001 (+15 ms)Region opened successfully at 1733709041008 (+7 ms) 2024-12-09T01:50:41,008 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:41,009 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:50:41,009 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:41,009 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:50:41,009 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:41,009 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:50:41,009 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:41,009 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:50:41,009 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:50:41,009 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:50:41,009 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:41,009 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:41,009 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:41,009 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:41,009 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:41,009 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:50:41,009 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:41,009 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709041008Disabling compacts and flushes for region at 1733709041008Disabling writes for close at 1733709041009 (+1 ms)Writing region close event to WAL at 1733709041009Closed at 1733709041009 2024-12-09T01:50:41,009 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:50:41,009 DEBUG [RS:0;ef6f18c58dc9:33525 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:50:41,010 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,010 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,010 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,010 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,010 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,010 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33525,1733709040728-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:50:41,011 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:50:41,011 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T01:50:41,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T01:50:41,013 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:50:41,016 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T01:50:41,028 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:50:41,028 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33525,1733709040728-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,029 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,029 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.Replication(171): ef6f18c58dc9,33525,1733709040728 started 2024-12-09T01:50:41,044 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,044 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,33525,1733709040728, RpcServer on ef6f18c58dc9/172.17.0.2:33525, sessionid=0x100747844f60001 2024-12-09T01:50:41,044 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:50:41,044 DEBUG [RS:0;ef6f18c58dc9:33525 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,33525,1733709040728 2024-12-09T01:50:41,044 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,33525,1733709040728' 2024-12-09T01:50:41,045 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:50:41,045 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:50:41,046 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:50:41,046 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:50:41,046 DEBUG [RS:0;ef6f18c58dc9:33525 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,33525,1733709040728 2024-12-09T01:50:41,046 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,33525,1733709040728' 2024-12-09T01:50:41,046 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:50:41,047 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:50:41,047 DEBUG [RS:0;ef6f18c58dc9:33525 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:50:41,047 INFO [RS:0;ef6f18c58dc9:33525 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:50:41,047 INFO [RS:0;ef6f18c58dc9:33525 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:50:41,150 INFO [RS:0;ef6f18c58dc9:33525 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C33525%2C1733709040728, suffix=, logDir=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/WALs/ef6f18c58dc9,33525,1733709040728, archiveDir=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/oldWALs, maxLogs=32 2024-12-09T01:50:41,152 INFO [RS:0;ef6f18c58dc9:33525 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C33525%2C1733709040728.1733709041151 2024-12-09T01:50:41,159 INFO [RS:0;ef6f18c58dc9:33525 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/WALs/ef6f18c58dc9,33525,1733709040728/ef6f18c58dc9%2C33525%2C1733709040728.1733709041151 2024-12-09T01:50:41,160 DEBUG [RS:0;ef6f18c58dc9:33525 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46473:46473),(127.0.0.1/127.0.0.1:34427:34427)] 2024-12-09T01:50:41,166 DEBUG [ef6f18c58dc9:34721 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T01:50:41,166 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef6f18c58dc9,33525,1733709040728 2024-12-09T01:50:41,168 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,33525,1733709040728, state=OPENING 2024-12-09T01:50:41,170 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T01:50:41,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:41,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:41,172 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:50:41,172 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:50:41,172 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:50:41,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,33525,1733709040728}] 2024-12-09T01:50:41,327 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T01:50:41,329 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46067, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T01:50:41,334 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T01:50:41,334 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:50:41,336 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C33525%2C1733709040728.meta, suffix=.meta, logDir=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/WALs/ef6f18c58dc9,33525,1733709040728, archiveDir=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/oldWALs, maxLogs=32 2024-12-09T01:50:41,339 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C33525%2C1733709040728.meta.1733709041338.meta 2024-12-09T01:50:41,345 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/WALs/ef6f18c58dc9,33525,1733709040728/ef6f18c58dc9%2C33525%2C1733709040728.meta.1733709041338.meta 2024-12-09T01:50:41,347 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46473:46473),(127.0.0.1/127.0.0.1:34427:34427)] 2024-12-09T01:50:41,347 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:50:41,348 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T01:50:41,348 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T01:50:41,348 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T01:50:41,348 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T01:50:41,348 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:50:41,348 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T01:50:41,349 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T01:50:41,350 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:50:41,351 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:50:41,351 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:41,352 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:41,352 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:50:41,353 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:50:41,353 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:41,354 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:41,354 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:50:41,355 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:50:41,355 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:41,355 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:41,356 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:50:41,356 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:50:41,356 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:41,357 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:41,357 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:50:41,358 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/data/hbase/meta/1588230740 2024-12-09T01:50:41,360 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/data/hbase/meta/1588230740 2024-12-09T01:50:41,361 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:50:41,361 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:50:41,362 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:50:41,364 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:50:41,366 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=711866, jitterRate=-0.09481678903102875}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:50:41,366 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T01:50:41,367 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733709041349Writing region info on filesystem at 1733709041349Initializing all the Stores at 1733709041350 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709041350Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709041350Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709041350Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709041350Cleaning up temporary data from old regions at 1733709041361 (+11 ms)Running coprocessor post-open hooks at 1733709041366 (+5 ms)Region opened successfully at 1733709041367 (+1 ms) 2024-12-09T01:50:41,369 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733709041326 2024-12-09T01:50:41,372 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T01:50:41,372 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T01:50:41,373 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,33525,1733709040728 2024-12-09T01:50:41,375 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,33525,1733709040728, state=OPEN 2024-12-09T01:50:41,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:50:41,379 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:50:41,379 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,33525,1733709040728 2024-12-09T01:50:41,380 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:50:41,380 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:50:41,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T01:50:41,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,33525,1733709040728 in 207 msec 2024-12-09T01:50:41,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T01:50:41,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 372 msec 2024-12-09T01:50:41,388 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:50:41,388 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T01:50:41,390 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:50:41,390 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,33525,1733709040728, seqNum=-1] 2024-12-09T01:50:41,391 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:50:41,393 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35923, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:50:41,400 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 443 msec 2024-12-09T01:50:41,401 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733709041401, completionTime=-1 2024-12-09T01:50:41,401 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T01:50:41,401 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T01:50:41,404 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T01:50:41,404 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733709101404 2024-12-09T01:50:41,404 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733709161404 2024-12-09T01:50:41,404 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-09T01:50:41,404 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34721,1733709040675-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,405 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34721,1733709040675-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,405 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34721,1733709040675-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,405 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef6f18c58dc9:34721, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,405 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,405 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,408 DEBUG [master/ef6f18c58dc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T01:50:41,412 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.626sec 2024-12-09T01:50:41,412 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T01:50:41,412 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T01:50:41,412 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T01:50:41,412 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T01:50:41,412 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T01:50:41,412 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34721,1733709040675-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:50:41,412 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34721,1733709040675-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T01:50:41,416 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T01:50:41,416 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T01:50:41,416 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34721,1733709040675-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:41,459 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@668848ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:50:41,459 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ef6f18c58dc9,34721,-1 for getting cluster id 2024-12-09T01:50:41,459 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T01:50:41,462 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9cb6e7ef-609a-427b-98a1-9e99301a6126' 2024-12-09T01:50:41,462 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T01:50:41,462 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9cb6e7ef-609a-427b-98a1-9e99301a6126" 2024-12-09T01:50:41,463 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@367dc70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:50:41,463 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ef6f18c58dc9,34721,-1] 2024-12-09T01:50:41,464 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T01:50:41,464 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:50:41,466 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59754, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T01:50:41,467 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e5835fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:50:41,468 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:50:41,469 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,33525,1733709040728, seqNum=-1] 2024-12-09T01:50:41,470 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:50:41,472 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48974, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:50:41,474 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ef6f18c58dc9,34721,1733709040675 2024-12-09T01:50:41,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:41,479 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T01:50:41,479 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T01:50:41,479 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:50:41,480 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:50:41,480 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:50:41,480 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:50:41,480 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T01:50:41,480 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=131514615, stopped=false 2024-12-09T01:50:41,480 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef6f18c58dc9,34721,1733709040675 2024-12-09T01:50:41,480 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T01:50:41,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:50:41,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:50:41,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:41,482 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:41,483 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:50:41,483 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:50:41,483 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:50:41,483 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:50:41,483 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,33525,1733709040728' ***** 2024-12-09T01:50:41,483 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:50:41,484 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:50:41,484 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:50:41,484 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:50:41,484 INFO [RS:0;ef6f18c58dc9:33525 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:50:41,484 INFO [RS:0;ef6f18c58dc9:33525 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:50:41,484 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:50:41,484 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,33525,1733709040728 2024-12-09T01:50:41,484 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:50:41,484 INFO [RS:0;ef6f18c58dc9:33525 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef6f18c58dc9:33525. 2024-12-09T01:50:41,485 DEBUG [RS:0;ef6f18c58dc9:33525 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:50:41,485 DEBUG [RS:0;ef6f18c58dc9:33525 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:50:41,485 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:50:41,485 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:50:41,485 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:50:41,485 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T01:50:41,485 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T01:50:41,485 DEBUG [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T01:50:41,485 DEBUG [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T01:50:41,485 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:50:41,486 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:50:41,486 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:50:41,486 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:50:41,486 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:50:41,486 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-09T01:50:41,504 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/data/hbase/meta/1588230740/.tmp/ns/f48f86ff8c7e484eae7c55a7ac89bd39 is 43, key is default/ns:d/1733709041393/Put/seqid=0 2024-12-09T01:50:41,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741835_1011 (size=5153) 2024-12-09T01:50:41,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741835_1011 (size=5153) 2024-12-09T01:50:41,512 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/data/hbase/meta/1588230740/.tmp/ns/f48f86ff8c7e484eae7c55a7ac89bd39 2024-12-09T01:50:41,521 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/data/hbase/meta/1588230740/.tmp/ns/f48f86ff8c7e484eae7c55a7ac89bd39 as hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/data/hbase/meta/1588230740/ns/f48f86ff8c7e484eae7c55a7ac89bd39 2024-12-09T01:50:41,528 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/data/hbase/meta/1588230740/ns/f48f86ff8c7e484eae7c55a7ac89bd39, entries=2, sequenceid=6, filesize=5.0 K 2024-12-09T01:50:41,530 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false 2024-12-09T01:50:41,530 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T01:50:41,537 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T01:50:41,538 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:50:41,538 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:50:41,539 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709041485Running coprocessor pre-close hooks at 1733709041485Disabling compacts and flushes for region at 1733709041485Disabling writes for close at 1733709041486 (+1 ms)Obtaining lock to block concurrent updates at 1733709041486Preparing flush snapshotting stores in 1588230740 at 1733709041486Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733709041486Flushing stores of hbase:meta,,1.1588230740 at 1733709041487 (+1 ms)Flushing 1588230740/ns: creating writer at 1733709041487Flushing 1588230740/ns: appending metadata at 1733709041504 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733709041504Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3b40ade6: reopening flushed file at 1733709041519 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 44ms, sequenceid=6, compaction requested=false at 1733709041530 (+11 ms)Writing region close event to WAL at 1733709041532 (+2 ms)Running coprocessor post-close hooks at 1733709041538 (+6 ms)Closed at 1733709041538 2024-12-09T01:50:41,539 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T01:50:41,686 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,33525,1733709040728; all regions closed. 2024-12-09T01:50:41,687 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,687 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,687 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,687 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,687 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741834_1010 (size=1152) 2024-12-09T01:50:41,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741834_1010 (size=1152) 2024-12-09T01:50:41,693 DEBUG [RS:0;ef6f18c58dc9:33525 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/oldWALs 2024-12-09T01:50:41,693 INFO [RS:0;ef6f18c58dc9:33525 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C33525%2C1733709040728.meta:.meta(num 1733709041338) 2024-12-09T01:50:41,694 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,694 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,694 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,694 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,694 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741833_1009 (size=93) 2024-12-09T01:50:41,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741833_1009 (size=93) 2024-12-09T01:50:41,698 DEBUG [RS:0;ef6f18c58dc9:33525 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/oldWALs 2024-12-09T01:50:41,698 INFO [RS:0;ef6f18c58dc9:33525 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C33525%2C1733709040728:(num 1733709041151) 2024-12-09T01:50:41,698 DEBUG [RS:0;ef6f18c58dc9:33525 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:50:41,699 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:50:41,699 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:50:41,699 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T01:50:41,699 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:50:41,699 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:50:41,699 INFO [RS:0;ef6f18c58dc9:33525 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33525 2024-12-09T01:50:41,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:50:41,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,33525,1733709040728 2024-12-09T01:50:41,702 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:50:41,703 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,33525,1733709040728] 2024-12-09T01:50:41,705 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,33525,1733709040728 already deleted, retry=false 2024-12-09T01:50:41,705 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,33525,1733709040728 expired; onlineServers=0 2024-12-09T01:50:41,705 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef6f18c58dc9,34721,1733709040675' ***** 2024-12-09T01:50:41,705 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T01:50:41,705 INFO [M:0;ef6f18c58dc9:34721 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:50:41,705 INFO [M:0;ef6f18c58dc9:34721 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:50:41,705 DEBUG [M:0;ef6f18c58dc9:34721 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T01:50:41,705 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T01:50:41,705 DEBUG [M:0;ef6f18c58dc9:34721 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T01:50:41,705 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709040964 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709040964,5,FailOnTimeoutGroup] 2024-12-09T01:50:41,706 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709040964 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709040964,5,FailOnTimeoutGroup] 2024-12-09T01:50:41,706 INFO [M:0;ef6f18c58dc9:34721 {}] hbase.ChoreService(370): Chore service for: master/ef6f18c58dc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T01:50:41,706 INFO [M:0;ef6f18c58dc9:34721 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:50:41,706 DEBUG [M:0;ef6f18c58dc9:34721 {}] master.HMaster(1795): Stopping service threads 2024-12-09T01:50:41,706 INFO [M:0;ef6f18c58dc9:34721 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T01:50:41,706 INFO [M:0;ef6f18c58dc9:34721 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:50:41,706 INFO [M:0;ef6f18c58dc9:34721 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T01:50:41,706 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T01:50:41,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T01:50:41,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:41,707 DEBUG [M:0;ef6f18c58dc9:34721 {}] zookeeper.ZKUtil(347): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T01:50:41,707 WARN [M:0;ef6f18c58dc9:34721 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T01:50:41,708 INFO [M:0;ef6f18c58dc9:34721 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/.lastflushedseqids 2024-12-09T01:50:41,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741836_1012 (size=99) 2024-12-09T01:50:41,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741836_1012 (size=99) 2024-12-09T01:50:41,722 INFO [M:0;ef6f18c58dc9:34721 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T01:50:41,722 INFO [M:0;ef6f18c58dc9:34721 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T01:50:41,722 DEBUG [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:50:41,722 INFO [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:41,722 DEBUG [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:41,722 DEBUG [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:50:41,722 DEBUG [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:41,722 INFO [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-09T01:50:41,742 DEBUG [M:0;ef6f18c58dc9:34721 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f89a7491a2104679b904177ca2dfa586 is 82, key is hbase:meta,,1/info:regioninfo/1733709041373/Put/seqid=0 2024-12-09T01:50:41,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741837_1013 (size=5672) 2024-12-09T01:50:41,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741837_1013 (size=5672) 2024-12-09T01:50:41,750 INFO [M:0;ef6f18c58dc9:34721 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f89a7491a2104679b904177ca2dfa586 2024-12-09T01:50:41,772 DEBUG [M:0;ef6f18c58dc9:34721 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ede398ffaaa64e30bf0690bcb2cfbd65 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733709041399/Put/seqid=0 2024-12-09T01:50:41,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741838_1014 (size=5275) 2024-12-09T01:50:41,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741838_1014 (size=5275) 2024-12-09T01:50:41,779 INFO [M:0;ef6f18c58dc9:34721 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ede398ffaaa64e30bf0690bcb2cfbd65 2024-12-09T01:50:41,800 DEBUG [M:0;ef6f18c58dc9:34721 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f45de26483fd48b18189fc9e90c3999e is 69, key is ef6f18c58dc9,33525,1733709040728/rs:state/1733709040986/Put/seqid=0 2024-12-09T01:50:41,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:50:41,803 INFO [RS:0;ef6f18c58dc9:33525 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:50:41,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33525-0x100747844f60001, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:50:41,804 INFO [RS:0;ef6f18c58dc9:33525 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,33525,1733709040728; zookeeper connection closed. 2024-12-09T01:50:41,804 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@67d06fdf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@67d06fdf 2024-12-09T01:50:41,804 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T01:50:41,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741839_1015 (size=5156) 2024-12-09T01:50:41,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741839_1015 (size=5156) 2024-12-09T01:50:41,807 INFO [M:0;ef6f18c58dc9:34721 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f45de26483fd48b18189fc9e90c3999e 2024-12-09T01:50:41,838 DEBUG [M:0;ef6f18c58dc9:34721 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b568d8dace2d46aebbbdc393b5f016a2 is 52, key is load_balancer_on/state:d/1733709041477/Put/seqid=0 2024-12-09T01:50:41,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741840_1016 (size=5056) 2024-12-09T01:50:41,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741840_1016 (size=5056) 2024-12-09T01:50:41,848 INFO [M:0;ef6f18c58dc9:34721 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b568d8dace2d46aebbbdc393b5f016a2 2024-12-09T01:50:41,855 DEBUG [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f89a7491a2104679b904177ca2dfa586 as hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f89a7491a2104679b904177ca2dfa586 2024-12-09T01:50:41,862 INFO [M:0;ef6f18c58dc9:34721 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f89a7491a2104679b904177ca2dfa586, entries=8, sequenceid=29, filesize=5.5 K 2024-12-09T01:50:41,864 DEBUG [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ede398ffaaa64e30bf0690bcb2cfbd65 as hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ede398ffaaa64e30bf0690bcb2cfbd65 2024-12-09T01:50:41,870 INFO [M:0;ef6f18c58dc9:34721 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ede398ffaaa64e30bf0690bcb2cfbd65, entries=3, sequenceid=29, filesize=5.2 K 2024-12-09T01:50:41,871 DEBUG [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f45de26483fd48b18189fc9e90c3999e as hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f45de26483fd48b18189fc9e90c3999e 2024-12-09T01:50:41,877 INFO [M:0;ef6f18c58dc9:34721 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f45de26483fd48b18189fc9e90c3999e, entries=1, sequenceid=29, filesize=5.0 K 2024-12-09T01:50:41,879 DEBUG [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b568d8dace2d46aebbbdc393b5f016a2 as hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b568d8dace2d46aebbbdc393b5f016a2 2024-12-09T01:50:41,885 INFO [M:0;ef6f18c58dc9:34721 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38277/user/jenkins/test-data/42d09ecd-701b-7b82-89f6-a89e3252f70b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b568d8dace2d46aebbbdc393b5f016a2, entries=1, sequenceid=29, filesize=4.9 K 2024-12-09T01:50:41,886 INFO [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 164ms, sequenceid=29, compaction requested=false 2024-12-09T01:50:41,892 INFO [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:41,892 DEBUG [M:0;ef6f18c58dc9:34721 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709041722Disabling compacts and flushes for region at 1733709041722Disabling writes for close at 1733709041722Obtaining lock to block concurrent updates at 1733709041722Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733709041722Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733709041723 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733709041724 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733709041724Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733709041741 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733709041742 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733709041756 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733709041772 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733709041772Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733709041784 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733709041800 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733709041800Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733709041814 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733709041837 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733709041837Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4623264b: reopening flushed file at 1733709041854 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4a96ad89: reopening flushed file at 1733709041863 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c768b73: reopening flushed file at 1733709041870 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1da4c297: reopening flushed file at 1733709041878 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 164ms, sequenceid=29, compaction requested=false at 1733709041886 (+8 ms)Writing region close event to WAL at 1733709041892 (+6 ms)Closed at 1733709041892 2024-12-09T01:50:41,893 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,893 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,893 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,893 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,893 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:41,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41297 is added to blk_1073741830_1006 (size=10311) 2024-12-09T01:50:41,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34405 is added to blk_1073741830_1006 (size=10311) 2024-12-09T01:50:41,897 INFO [M:0;ef6f18c58dc9:34721 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T01:50:41,897 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:50:41,897 INFO [M:0;ef6f18c58dc9:34721 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34721 2024-12-09T01:50:41,898 INFO [M:0;ef6f18c58dc9:34721 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:50:42,000 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:50:42,000 INFO [M:0;ef6f18c58dc9:34721 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:50:42,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34721-0x100747844f60000, quorum=127.0.0.1:64800, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:50:42,003 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b95e72c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:42,004 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@376d199b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:50:42,004 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:50:42,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@194f043a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:50:42,004 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f11c100{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/hadoop.log.dir/,STOPPED} 2024-12-09T01:50:42,006 WARN [BP-250701132-172.17.0.2-1733709039698 heartbeating to localhost/127.0.0.1:38277 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:50:42,006 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:50:42,006 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:50:42,006 WARN [BP-250701132-172.17.0.2-1733709039698 heartbeating to localhost/127.0.0.1:38277 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-250701132-172.17.0.2-1733709039698 (Datanode Uuid c6676e61-aec8-42d0-bd92-d1534fefe8fe) service to localhost/127.0.0.1:38277 2024-12-09T01:50:42,007 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/cluster_c2874f64-cea7-5d0b-2735-033987403834/data/data3/current/BP-250701132-172.17.0.2-1733709039698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:42,007 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/cluster_c2874f64-cea7-5d0b-2735-033987403834/data/data4/current/BP-250701132-172.17.0.2-1733709039698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:42,008 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:50:42,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@641eaf99{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:42,013 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@276f8783{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:50:42,013 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:50:42,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69a0f3c6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:50:42,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48bb784e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/hadoop.log.dir/,STOPPED} 2024-12-09T01:50:42,015 WARN [BP-250701132-172.17.0.2-1733709039698 heartbeating to localhost/127.0.0.1:38277 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:50:42,016 WARN [BP-250701132-172.17.0.2-1733709039698 heartbeating to localhost/127.0.0.1:38277 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-250701132-172.17.0.2-1733709039698 (Datanode Uuid 126cd986-b6f7-4136-90c3-b882c49e78f2) service to localhost/127.0.0.1:38277 2024-12-09T01:50:42,016 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:50:42,016 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:50:42,017 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/cluster_c2874f64-cea7-5d0b-2735-033987403834/data/data1/current/BP-250701132-172.17.0.2-1733709039698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:42,017 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/cluster_c2874f64-cea7-5d0b-2735-033987403834/data/data2/current/BP-250701132-172.17.0.2-1733709039698 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:42,017 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:50:42,034 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1edca743{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:50:42,034 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c443180{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:50:42,034 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:50:42,035 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59505eb5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:50:42,035 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46d26a79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/hadoop.log.dir/,STOPPED} 2024-12-09T01:50:42,044 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T01:50:42,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T01:50:42,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T01:50:42,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/hadoop.log.dir so I do NOT create it in target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9 2024-12-09T01:50:42,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/530dcfa0-388a-ff3b-5e4c-e1ff252bb3a0/hadoop.tmp.dir so I do NOT create it in target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9 2024-12-09T01:50:42,065 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b, deleteOnExit=true 2024-12-09T01:50:42,065 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T01:50:42,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/test.cache.data in system properties and HBase conf 2024-12-09T01:50:42,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T01:50:42,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir in system properties and HBase conf 2024-12-09T01:50:42,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T01:50:42,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T01:50:42,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T01:50:42,066 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T01:50:42,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:50:42,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:50:42,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T01:50:42,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:50:42,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T01:50:42,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T01:50:42,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:50:42,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:50:42,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T01:50:42,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/nfs.dump.dir in system properties and HBase conf 2024-12-09T01:50:42,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/java.io.tmpdir in system properties and HBase conf 2024-12-09T01:50:42,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:50:42,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T01:50:42,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T01:50:42,082 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:50:42,161 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:50:42,167 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:50:42,171 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:50:42,171 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:50:42,171 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:50:42,172 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:50:42,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3e7d5546{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:50:42,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@142d24a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:50:42,297 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77cd23f0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/java.io.tmpdir/jetty-localhost-38435-hadoop-hdfs-3_4_1-tests_jar-_-any-8054009409810572319/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:50:42,297 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3ed3a961{HTTP/1.1, (http/1.1)}{localhost:38435} 2024-12-09T01:50:42,297 INFO [Time-limited test {}] server.Server(415): Started @105035ms 2024-12-09T01:50:42,316 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:50:42,389 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:50:42,393 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:50:42,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:50:42,394 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:50:42,394 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:50:42,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7183cb8b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:50:42,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65dec1b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:50:42,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67cf8368{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/java.io.tmpdir/jetty-localhost-35813-hadoop-hdfs-3_4_1-tests_jar-_-any-15698101875946650282/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:42,515 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30e7c448{HTTP/1.1, (http/1.1)}{localhost:35813} 2024-12-09T01:50:42,516 INFO [Time-limited test {}] server.Server(415): Started @105253ms 2024-12-09T01:50:42,517 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:50:42,551 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:50:42,555 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:50:42,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:50:42,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:50:42,559 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:50:42,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7982676d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:50:42,561 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66182b08{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:50:42,637 WARN [Thread-657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data1/current/BP-1767086187-172.17.0.2-1733709042101/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:42,637 WARN [Thread-658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data2/current/BP-1767086187-172.17.0.2-1733709042101/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:42,665 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:50:42,668 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x24e71928b7b0c65c with lease ID 0x3209ef898166a60f: Processing first storage report for DS-a89c88c2-2971-4181-bab7-a51a0eddf136 from datanode DatanodeRegistration(127.0.0.1:34383, datanodeUuid=5698537a-b462-405f-a34c-4c158c19f051, infoPort=34095, infoSecurePort=0, ipcPort=44189, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101) 2024-12-09T01:50:42,669 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24e71928b7b0c65c with lease ID 0x3209ef898166a60f: from storage DS-a89c88c2-2971-4181-bab7-a51a0eddf136 node DatanodeRegistration(127.0.0.1:34383, datanodeUuid=5698537a-b462-405f-a34c-4c158c19f051, infoPort=34095, infoSecurePort=0, ipcPort=44189, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:50:42,669 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x24e71928b7b0c65c with lease ID 0x3209ef898166a60f: Processing first storage report for DS-93dd9675-0c79-41ea-bd7e-6a3ca2223e4b from datanode DatanodeRegistration(127.0.0.1:34383, datanodeUuid=5698537a-b462-405f-a34c-4c158c19f051, infoPort=34095, infoSecurePort=0, ipcPort=44189, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101) 2024-12-09T01:50:42,669 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x24e71928b7b0c65c with lease ID 0x3209ef898166a60f: from storage DS-93dd9675-0c79-41ea-bd7e-6a3ca2223e4b node DatanodeRegistration(127.0.0.1:34383, datanodeUuid=5698537a-b462-405f-a34c-4c158c19f051, infoPort=34095, infoSecurePort=0, ipcPort=44189, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T01:50:42,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@229a8eec{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/java.io.tmpdir/jetty-localhost-43493-hadoop-hdfs-3_4_1-tests_jar-_-any-1493542582656607043/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:42,692 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7720beab{HTTP/1.1, (http/1.1)}{localhost:43493} 2024-12-09T01:50:42,692 INFO [Time-limited test {}] server.Server(415): Started @105429ms 2024-12-09T01:50:42,693 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:50:42,801 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data3/current/BP-1767086187-172.17.0.2-1733709042101/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:42,801 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data4/current/BP-1767086187-172.17.0.2-1733709042101/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:42,825 WARN [Thread-672 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:50:42,828 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59c7bc55e89373a8 with lease ID 0x3209ef898166a610: Processing first storage report for DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1 from datanode DatanodeRegistration(127.0.0.1:46423, datanodeUuid=63d05511-8676-42d8-83ea-542fafbdd738, infoPort=45899, infoSecurePort=0, ipcPort=45959, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101) 2024-12-09T01:50:42,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59c7bc55e89373a8 with lease ID 0x3209ef898166a610: from storage DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1 node DatanodeRegistration(127.0.0.1:46423, datanodeUuid=63d05511-8676-42d8-83ea-542fafbdd738, infoPort=45899, infoSecurePort=0, ipcPort=45959, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:50:42,828 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59c7bc55e89373a8 with lease ID 0x3209ef898166a610: Processing first storage report for DS-f3231ded-9564-4cf1-871c-41faa46f29da from datanode DatanodeRegistration(127.0.0.1:46423, datanodeUuid=63d05511-8676-42d8-83ea-542fafbdd738, infoPort=45899, infoSecurePort=0, ipcPort=45959, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101) 2024-12-09T01:50:42,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59c7bc55e89373a8 with lease ID 0x3209ef898166a610: from storage DS-f3231ded-9564-4cf1-871c-41faa46f29da node DatanodeRegistration(127.0.0.1:46423, datanodeUuid=63d05511-8676-42d8-83ea-542fafbdd738, infoPort=45899, infoSecurePort=0, ipcPort=45959, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:50:42,922 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9 2024-12-09T01:50:42,926 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/zookeeper_0, clientPort=59768, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T01:50:42,927 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59768 2024-12-09T01:50:42,927 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:42,929 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:42,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46423 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:50:42,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34383 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:50:42,942 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b with version=8 2024-12-09T01:50:42,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/hbase-staging 2024-12-09T01:50:42,944 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:50:42,945 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:42,945 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:42,945 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:50:42,945 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:42,945 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:50:42,945 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T01:50:42,945 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:50:42,946 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46389 2024-12-09T01:50:42,948 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46389 connecting to ZooKeeper ensemble=127.0.0.1:59768 2024-12-09T01:50:42,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:463890x0, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:50:42,954 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46389-0x10074784dd30000 connected 2024-12-09T01:50:42,980 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:42,981 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:42,984 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:50:42,984 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b, hbase.cluster.distributed=false 2024-12-09T01:50:42,985 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:50:42,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46389 2024-12-09T01:50:42,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46389 2024-12-09T01:50:42,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46389 2024-12-09T01:50:42,989 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46389 2024-12-09T01:50:42,990 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46389 2024-12-09T01:50:43,006 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:50:43,006 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:43,006 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:43,006 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:50:43,006 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:43,007 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:50:43,007 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:50:43,007 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:50:43,008 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45353 2024-12-09T01:50:43,009 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45353 connecting to ZooKeeper ensemble=127.0.0.1:59768 2024-12-09T01:50:43,009 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:43,011 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:50:43,012 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:43,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:453530x0, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:50:43,017 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:50:43,017 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45353-0x10074784dd30001 connected 2024-12-09T01:50:43,017 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:50:43,018 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:50:43,019 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:50:43,020 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:50:43,024 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45353 2024-12-09T01:50:43,024 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45353 2024-12-09T01:50:43,025 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45353 2024-12-09T01:50:43,025 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45353 2024-12-09T01:50:43,026 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45353 2024-12-09T01:50:43,040 DEBUG [M:0;ef6f18c58dc9:46389 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef6f18c58dc9:46389 2024-12-09T01:50:43,040 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef6f18c58dc9,46389,1733709042944 2024-12-09T01:50:43,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:50:43,042 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:50:43,043 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef6f18c58dc9,46389,1733709042944 2024-12-09T01:50:43,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:50:43,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:43,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:43,046 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T01:50:43,047 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef6f18c58dc9,46389,1733709042944 from backup master directory 2024-12-09T01:50:43,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef6f18c58dc9,46389,1733709042944 2024-12-09T01:50:43,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:50:43,049 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:50:43,049 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef6f18c58dc9,46389,1733709042944 2024-12-09T01:50:43,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:50:43,054 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/hbase.id] with ID: 38cf29e5-1d03-4dcb-b1b4-6c4766dbeb5f 2024-12-09T01:50:43,054 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/.tmp/hbase.id 2024-12-09T01:50:43,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:50:43,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T01:50:43,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46423 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:50:43,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34383 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:50:43,061 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T01:50:43,061 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/.tmp/hbase.id]:[hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/hbase.id] 2024-12-09T01:50:43,062 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-12-09T01:50:43,075 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:43,075 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T01:50:43,077 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T01:50:43,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:43,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:43,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46423 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:50:43,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34383 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:50:43,091 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:50:43,092 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T01:50:43,092 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:50:43,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46423 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:50:43,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34383 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:50:43,103 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store 2024-12-09T01:50:43,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46423 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:50:43,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34383 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:50:43,114 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:50:43,114 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:50:43,114 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:43,114 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:43,115 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:50:43,115 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:43,115 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:50:43,115 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709043114Disabling compacts and flushes for region at 1733709043114Disabling writes for close at 1733709043115 (+1 ms)Writing region close event to WAL at 1733709043115Closed at 1733709043115 2024-12-09T01:50:43,116 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/.initializing 2024-12-09T01:50:43,116 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/WALs/ef6f18c58dc9,46389,1733709042944 2024-12-09T01:50:43,119 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C46389%2C1733709042944, suffix=, logDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/WALs/ef6f18c58dc9,46389,1733709042944, archiveDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/oldWALs, maxLogs=10 2024-12-09T01:50:43,119 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C46389%2C1733709042944.1733709043119 2024-12-09T01:50:43,125 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/WALs/ef6f18c58dc9,46389,1733709042944/ef6f18c58dc9%2C46389%2C1733709042944.1733709043119 2024-12-09T01:50:43,127 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34095:34095),(127.0.0.1/127.0.0.1:45899:45899)] 2024-12-09T01:50:43,131 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:50:43,132 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:50:43,132 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:43,132 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:43,133 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:43,135 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T01:50:43,135 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,136 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:43,136 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:43,137 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T01:50:43,137 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,138 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:50:43,138 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:43,139 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T01:50:43,139 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,140 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:50:43,140 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:43,142 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T01:50:43,142 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,142 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:50:43,143 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:43,144 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:43,144 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:43,146 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:43,146 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:43,146 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:50:43,148 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:50:43,151 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:50:43,152 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=714533, jitterRate=-0.0914250910282135}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:50:43,153 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733709043132Initializing all the Stores at 1733709043133 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709043133Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709043133Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709043133Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709043133Cleaning up temporary data from old regions at 1733709043146 (+13 ms)Region opened successfully at 1733709043153 (+7 ms) 2024-12-09T01:50:43,153 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T01:50:43,157 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@313b8b8d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:50:43,159 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T01:50:43,159 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T01:50:43,159 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T01:50:43,159 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T01:50:43,160 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T01:50:43,160 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T01:50:43,160 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T01:50:43,164 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T01:50:43,165 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T01:50:43,167 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T01:50:43,167 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T01:50:43,168 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T01:50:43,170 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T01:50:43,170 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T01:50:43,171 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T01:50:43,172 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T01:50:43,173 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T01:50:43,175 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T01:50:43,177 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T01:50:43,178 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T01:50:43,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:50:43,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:50:43,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:43,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:43,180 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef6f18c58dc9,46389,1733709042944, sessionid=0x10074784dd30000, setting cluster-up flag (Was=false) 2024-12-09T01:50:43,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:43,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:43,191 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T01:50:43,193 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,46389,1733709042944 2024-12-09T01:50:43,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:43,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:43,202 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T01:50:43,203 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,46389,1733709042944 2024-12-09T01:50:43,204 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T01:50:43,207 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T01:50:43,207 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T01:50:43,207 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T01:50:43,207 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef6f18c58dc9,46389,1733709042944 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T01:50:43,209 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:50:43,209 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:50:43,209 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:50:43,209 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:50:43,209 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef6f18c58dc9:0, corePoolSize=10, maxPoolSize=10 2024-12-09T01:50:43,209 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,209 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:50:43,209 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,210 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733709073210 2024-12-09T01:50:43,211 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T01:50:43,211 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T01:50:43,211 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T01:50:43,211 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T01:50:43,211 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T01:50:43,211 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T01:50:43,211 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:50:43,211 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T01:50:43,211 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,212 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T01:50:43,212 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T01:50:43,212 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T01:50:43,212 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,213 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T01:50:43,214 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T01:50:43,214 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T01:50:43,216 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709043215,5,FailOnTimeoutGroup] 2024-12-09T01:50:43,216 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709043216,5,FailOnTimeoutGroup] 2024-12-09T01:50:43,216 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,216 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T01:50:43,216 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,216 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34383 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:50:43,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46423 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:50:43,228 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T01:50:43,229 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b 2024-12-09T01:50:43,229 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(746): ClusterId : 38cf29e5-1d03-4dcb-b1b4-6c4766dbeb5f 2024-12-09T01:50:43,229 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:50:43,232 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:50:43,233 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:50:43,235 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:50:43,236 DEBUG [RS:0;ef6f18c58dc9:45353 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cf1fb71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:50:43,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46423 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:50:43,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34383 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:50:43,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:50:43,244 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:50:43,246 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:50:43,246 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,247 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:43,247 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:50:43,249 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:50:43,249 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,249 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:43,250 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:50:43,252 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:50:43,252 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:43,253 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:50:43,255 DEBUG [RS:0;ef6f18c58dc9:45353 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef6f18c58dc9:45353 2024-12-09T01:50:43,255 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:50:43,255 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:50:43,255 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:50:43,255 DEBUG [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:50:43,255 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,256 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:43,256 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:50:43,256 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,46389,1733709042944 with port=45353, startcode=1733709043006 2024-12-09T01:50:43,256 DEBUG [RS:0;ef6f18c58dc9:45353 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:50:43,257 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740 2024-12-09T01:50:43,257 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740 2024-12-09T01:50:43,259 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:50:43,259 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:50:43,260 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:50:43,261 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:50:43,265 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:50:43,265 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38705, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:50:43,265 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=717430, jitterRate=-0.08774159848690033}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:50:43,265 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46389 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,45353,1733709043006 2024-12-09T01:50:43,265 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46389 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,45353,1733709043006 2024-12-09T01:50:43,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733709043243Initializing all the Stores at 1733709043244 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709043244Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709043244Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709043244Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709043244Cleaning up temporary data from old regions at 1733709043259 (+15 ms)Region opened successfully at 1733709043267 (+8 ms) 2024-12-09T01:50:43,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:50:43,267 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:50:43,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:50:43,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:50:43,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:50:43,267 DEBUG [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b 2024-12-09T01:50:43,268 DEBUG [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42375 2024-12-09T01:50:43,268 DEBUG [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:50:43,268 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:50:43,268 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709043267Disabling compacts and flushes for region at 1733709043267Disabling writes for close at 1733709043267Writing region close event to WAL at 1733709043268 (+1 ms)Closed at 1733709043268 2024-12-09T01:50:43,269 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:50:43,270 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T01:50:43,270 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T01:50:43,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:50:43,272 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:50:43,272 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,45353,1733709043006] 2024-12-09T01:50:43,272 DEBUG [RS:0;ef6f18c58dc9:45353 {}] zookeeper.ZKUtil(111): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,45353,1733709043006 2024-12-09T01:50:43,273 WARN [RS:0;ef6f18c58dc9:45353 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:50:43,273 INFO [RS:0;ef6f18c58dc9:45353 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:50:43,273 DEBUG [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006 2024-12-09T01:50:43,273 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T01:50:43,279 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:50:43,284 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:50:43,285 INFO [RS:0;ef6f18c58dc9:45353 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:50:43,285 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,285 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:50:43,286 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:50:43,286 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,286 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,286 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,286 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,286 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,286 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,286 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:50:43,286 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,287 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,287 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,287 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,287 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,287 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:43,287 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:50:43,287 DEBUG [RS:0;ef6f18c58dc9:45353 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:50:43,287 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,287 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,287 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,287 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,288 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,288 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,45353,1733709043006-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:50:43,346 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:50:43,346 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,45353,1733709043006-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,346 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,346 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.Replication(171): ef6f18c58dc9,45353,1733709043006 started 2024-12-09T01:50:43,363 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,363 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,45353,1733709043006, RpcServer on ef6f18c58dc9/172.17.0.2:45353, sessionid=0x10074784dd30001 2024-12-09T01:50:43,364 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:50:43,364 DEBUG [RS:0;ef6f18c58dc9:45353 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,45353,1733709043006 2024-12-09T01:50:43,364 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,45353,1733709043006' 2024-12-09T01:50:43,364 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:50:43,365 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:50:43,375 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:50:43,375 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:50:43,375 DEBUG [RS:0;ef6f18c58dc9:45353 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,45353,1733709043006 2024-12-09T01:50:43,375 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,45353,1733709043006' 2024-12-09T01:50:43,375 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:50:43,376 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:50:43,377 DEBUG [RS:0;ef6f18c58dc9:45353 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:50:43,377 INFO [RS:0;ef6f18c58dc9:45353 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:50:43,377 INFO [RS:0;ef6f18c58dc9:45353 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:50:43,424 WARN [ef6f18c58dc9:46389 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T01:50:43,425 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:50:43,431 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:50:43,480 INFO [RS:0;ef6f18c58dc9:45353 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C45353%2C1733709043006, suffix=, logDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006, archiveDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/oldWALs, maxLogs=32 2024-12-09T01:50:43,481 INFO [RS:0;ef6f18c58dc9:45353 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 2024-12-09T01:50:43,489 INFO [RS:0;ef6f18c58dc9:45353 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 2024-12-09T01:50:43,493 DEBUG [RS:0;ef6f18c58dc9:45353 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45899:45899),(127.0.0.1/127.0.0.1:34095:34095)] 2024-12-09T01:50:43,674 DEBUG [ef6f18c58dc9:46389 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T01:50:43,674 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef6f18c58dc9,45353,1733709043006 2024-12-09T01:50:43,676 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,45353,1733709043006, state=OPENING 2024-12-09T01:50:43,678 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T01:50:43,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:43,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:50:43,682 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:50:43,682 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:50:43,682 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,45353,1733709043006}] 2024-12-09T01:50:43,682 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:50:43,836 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T01:50:43,838 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42917, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T01:50:43,842 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T01:50:43,843 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:50:43,845 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C45353%2C1733709043006.meta, suffix=.meta, logDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006, archiveDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/oldWALs, maxLogs=32 2024-12-09T01:50:43,846 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta 2024-12-09T01:50:43,854 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta 2024-12-09T01:50:43,857 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45899:45899),(127.0.0.1/127.0.0.1:34095:34095)] 2024-12-09T01:50:43,858 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:50:43,859 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T01:50:43,859 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T01:50:43,859 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T01:50:43,859 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T01:50:43,859 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:50:43,859 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T01:50:43,859 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T01:50:43,861 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:50:43,862 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:50:43,862 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,863 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:43,863 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:50:43,864 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:50:43,864 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,866 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:43,866 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:50:43,867 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:50:43,867 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,867 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:43,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:50:43,868 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:50:43,868 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:43,869 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:50:43,869 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:50:43,870 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740 2024-12-09T01:50:43,871 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740 2024-12-09T01:50:43,872 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:50:43,873 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:50:43,873 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:50:43,875 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:50:43,876 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805725, jitterRate=0.024532347917556763}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:50:43,876 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T01:50:43,876 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733709043859Writing region info on filesystem at 1733709043859Initializing all the Stores at 1733709043861 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709043861Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709043861Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709043861Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709043861Cleaning up temporary data from old regions at 1733709043873 (+12 ms)Running coprocessor post-open hooks at 1733709043876 (+3 ms)Region opened successfully at 1733709043876 2024-12-09T01:50:43,878 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733709043836 2024-12-09T01:50:43,881 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T01:50:43,881 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T01:50:43,882 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,45353,1733709043006 2024-12-09T01:50:43,883 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,45353,1733709043006, state=OPEN 2024-12-09T01:50:43,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:50:43,889 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:50:43,889 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,45353,1733709043006 2024-12-09T01:50:43,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:50:43,890 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:50:43,893 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T01:50:43,893 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,45353,1733709043006 in 208 msec 2024-12-09T01:50:43,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T01:50:43,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 623 msec 2024-12-09T01:50:43,897 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:50:43,898 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T01:50:43,899 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:50:43,899 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,45353,1733709043006, seqNum=-1] 2024-12-09T01:50:43,900 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:50:43,901 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34587, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:50:43,909 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 701 msec 2024-12-09T01:50:43,909 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733709043909, completionTime=-1 2024-12-09T01:50:43,909 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T01:50:43,910 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T01:50:43,913 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T01:50:43,913 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733709103913 2024-12-09T01:50:43,913 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733709163913 2024-12-09T01:50:43,913 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-09T01:50:43,914 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,46389,1733709042944-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,914 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,46389,1733709042944-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,914 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,46389,1733709042944-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,914 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef6f18c58dc9:46389, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,914 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,916 DEBUG [master/ef6f18c58dc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T01:50:43,920 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,924 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.875sec 2024-12-09T01:50:43,924 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T01:50:43,924 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T01:50:43,924 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T01:50:43,924 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T01:50:43,924 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T01:50:43,924 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,46389,1733709042944-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:50:43,924 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,46389,1733709042944-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T01:50:43,927 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T01:50:43,927 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T01:50:43,927 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,46389,1733709042944-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:43,929 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2efeeef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:50:43,929 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ef6f18c58dc9,46389,-1 for getting cluster id 2024-12-09T01:50:43,930 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T01:50:43,931 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '38cf29e5-1d03-4dcb-b1b4-6c4766dbeb5f' 2024-12-09T01:50:43,932 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T01:50:43,932 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "38cf29e5-1d03-4dcb-b1b4-6c4766dbeb5f" 2024-12-09T01:50:43,932 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22f7d28a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:50:43,932 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ef6f18c58dc9,46389,-1] 2024-12-09T01:50:43,932 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T01:50:43,933 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:50:43,935 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55068, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T01:50:43,936 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47d1c4f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:50:43,937 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:50:43,938 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,45353,1733709043006, seqNum=-1] 2024-12-09T01:50:43,938 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:50:43,940 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58046, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:50:43,941 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ef6f18c58dc9,46389,1733709042944 2024-12-09T01:50:43,941 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:43,942 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T01:50:43,944 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:50:43,944 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T01:50:43,960 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:50:43,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:43,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:43,960 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:50:43,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:50:43,960 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:50:43,960 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:50:43,960 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:50:43,961 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36919 2024-12-09T01:50:43,962 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36919 connecting to ZooKeeper ensemble=127.0.0.1:59768 2024-12-09T01:50:43,962 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:43,964 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:50:43,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:369190x0, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:50:43,970 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-12-09T01:50:43,970 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:369190x0, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-12-09T01:50:43,970 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36919-0x10074784dd30002 connected 2024-12-09T01:50:43,971 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:50:43,971 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:50:43,974 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:50:43,975 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:50:43,976 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:50:43,977 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:36919-0x10074784dd30002, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T01:50:43,978 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36919-0x10074784dd30002, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:50:43,979 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36919 2024-12-09T01:50:43,980 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36919 2024-12-09T01:50:43,983 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36919 2024-12-09T01:50:43,984 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36919 2024-12-09T01:50:43,986 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36919 2024-12-09T01:50:43,987 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.HRegionServer(746): ClusterId : 38cf29e5-1d03-4dcb-b1b4-6c4766dbeb5f 2024-12-09T01:50:43,987 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:50:43,991 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:50:43,991 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:50:43,993 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:50:43,994 DEBUG [RS:1;ef6f18c58dc9:36919 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47d8087b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:50:44,010 DEBUG [RS:1;ef6f18c58dc9:36919 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;ef6f18c58dc9:36919 2024-12-09T01:50:44,010 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:50:44,010 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:50:44,010 DEBUG [RS:1;ef6f18c58dc9:36919 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:50:44,011 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,46389,1733709042944 with port=36919, startcode=1733709043959 2024-12-09T01:50:44,011 DEBUG [RS:1;ef6f18c58dc9:36919 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:50:44,013 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48097, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:50:44,014 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46389 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,36919,1733709043959 2024-12-09T01:50:44,014 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46389 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,36919,1733709043959 2024-12-09T01:50:44,016 DEBUG [RS:1;ef6f18c58dc9:36919 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b 2024-12-09T01:50:44,016 DEBUG [RS:1;ef6f18c58dc9:36919 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42375 2024-12-09T01:50:44,016 DEBUG [RS:1;ef6f18c58dc9:36919 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:50:44,018 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:50:44,018 DEBUG [RS:1;ef6f18c58dc9:36919 {}] zookeeper.ZKUtil(111): regionserver:36919-0x10074784dd30002, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,36919,1733709043959 2024-12-09T01:50:44,019 WARN [RS:1;ef6f18c58dc9:36919 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:50:44,019 INFO [RS:1;ef6f18c58dc9:36919 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:50:44,019 DEBUG [RS:1;ef6f18c58dc9:36919 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959 2024-12-09T01:50:44,019 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,36919,1733709043959] 2024-12-09T01:50:44,028 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:50:44,030 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:50:44,031 INFO [RS:1;ef6f18c58dc9:36919 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:50:44,031 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:44,032 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:50:44,033 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:50:44,033 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:50:44,033 DEBUG [RS:1;ef6f18c58dc9:36919 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:50:44,035 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:44,035 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:44,035 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:44,035 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:44,035 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:44,035 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,36919,1733709043959-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:50:44,051 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:50:44,051 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,36919,1733709043959-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:44,051 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:44,051 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.Replication(171): ef6f18c58dc9,36919,1733709043959 started 2024-12-09T01:50:44,065 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:50:44,066 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,36919,1733709043959, RpcServer on ef6f18c58dc9/172.17.0.2:36919, sessionid=0x10074784dd30002 2024-12-09T01:50:44,066 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:50:44,066 DEBUG [RS:1;ef6f18c58dc9:36919 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,36919,1733709043959 2024-12-09T01:50:44,066 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,36919,1733709043959' 2024-12-09T01:50:44,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;ef6f18c58dc9:36919,5,FailOnTimeoutGroup] 2024-12-09T01:50:44,066 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:50:44,066 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-12-09T01:50:44,066 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:50:44,066 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T01:50:44,067 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:50:44,067 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:50:44,067 DEBUG [RS:1;ef6f18c58dc9:36919 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,36919,1733709043959 2024-12-09T01:50:44,067 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,36919,1733709043959' 2024-12-09T01:50:44,067 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:50:44,067 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:50:44,068 DEBUG [RS:1;ef6f18c58dc9:36919 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:50:44,068 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is ef6f18c58dc9,46389,1733709042944 2024-12-09T01:50:44,068 INFO [RS:1;ef6f18c58dc9:36919 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:50:44,068 INFO [RS:1;ef6f18c58dc9:36919 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:50:44,068 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@547d4f62 2024-12-09T01:50:44,068 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T01:50:44,071 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55074, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T01:50:44,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T01:50:44,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T01:50:44,072 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:50:44,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T01:50:44,075 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T01:50:44,076 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:44,076 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-12-09T01:50:44,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:50:44,077 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T01:50:44,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34383 is added to blk_1073741835_1011 (size=393) 2024-12-09T01:50:44,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46423 is added to blk_1073741835_1011 (size=393) 2024-12-09T01:50:44,090 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 813a51a97e7f9ea6d5c277ef30637811, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b 2024-12-09T01:50:44,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46423 is added to blk_1073741836_1012 (size=76) 2024-12-09T01:50:44,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34383 is added to blk_1073741836_1012 (size=76) 2024-12-09T01:50:44,098 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:50:44,098 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 813a51a97e7f9ea6d5c277ef30637811, disabling compactions & flushes 2024-12-09T01:50:44,098 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:50:44,098 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:50:44,098 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. after waiting 0 ms 2024-12-09T01:50:44,098 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:50:44,098 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:50:44,098 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 813a51a97e7f9ea6d5c277ef30637811: Waiting for close lock at 1733709044098Disabling compacts and flushes for region at 1733709044098Disabling writes for close at 1733709044098Writing region close event to WAL at 1733709044098Closed at 1733709044098 2024-12-09T01:50:44,100 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T01:50:44,101 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733709044100"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733709044100"}]},"ts":"1733709044100"} 2024-12-09T01:50:44,103 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T01:50:44,105 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T01:50:44,105 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733709044105"}]},"ts":"1733709044105"} 2024-12-09T01:50:44,107 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-12-09T01:50:44,108 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=813a51a97e7f9ea6d5c277ef30637811, ASSIGN}] 2024-12-09T01:50:44,110 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=813a51a97e7f9ea6d5c277ef30637811, ASSIGN 2024-12-09T01:50:44,111 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=813a51a97e7f9ea6d5c277ef30637811, ASSIGN; state=OFFLINE, location=ef6f18c58dc9,45353,1733709043006; forceNewPlan=false, retain=false 2024-12-09T01:50:44,171 INFO [RS:1;ef6f18c58dc9:36919 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C36919%2C1733709043959, suffix=, logDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959, archiveDir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/oldWALs, maxLogs=32 2024-12-09T01:50:44,172 INFO [RS:1;ef6f18c58dc9:36919 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 2024-12-09T01:50:44,228 INFO [RS:1;ef6f18c58dc9:36919 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 2024-12-09T01:50:44,256 DEBUG [RS:1;ef6f18c58dc9:36919 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34095:34095),(127.0.0.1/127.0.0.1:45899:45899)] 2024-12-09T01:50:44,262 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=813a51a97e7f9ea6d5c277ef30637811, regionState=OPENING, regionLocation=ef6f18c58dc9,45353,1733709043006 2024-12-09T01:50:44,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=813a51a97e7f9ea6d5c277ef30637811, ASSIGN because future has completed 2024-12-09T01:50:44,280 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 813a51a97e7f9ea6d5c277ef30637811, server=ef6f18c58dc9,45353,1733709043006}] 2024-12-09T01:50:44,441 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:50:44,441 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 813a51a97e7f9ea6d5c277ef30637811, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:50:44,442 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:50:44,442 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:50:44,442 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:50:44,442 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:50:44,447 INFO [StoreOpener-813a51a97e7f9ea6d5c277ef30637811-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:50:44,452 INFO [StoreOpener-813a51a97e7f9ea6d5c277ef30637811-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 813a51a97e7f9ea6d5c277ef30637811 columnFamilyName info 2024-12-09T01:50:44,452 DEBUG [StoreOpener-813a51a97e7f9ea6d5c277ef30637811-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:50:44,453 INFO [StoreOpener-813a51a97e7f9ea6d5c277ef30637811-1 {}] regionserver.HStore(327): Store=813a51a97e7f9ea6d5c277ef30637811/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:50:44,453 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:50:44,454 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:50:44,455 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:50:44,455 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:50:44,455 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:50:44,460 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:50:44,468 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:50:44,468 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 813a51a97e7f9ea6d5c277ef30637811; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=864551, jitterRate=0.0993335098028183}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T01:50:44,469 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:50:44,469 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 813a51a97e7f9ea6d5c277ef30637811: Running coprocessor pre-open hook at 1733709044442Writing region info on filesystem at 1733709044442Initializing all the Stores at 1733709044445 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709044445Cleaning up temporary data from old regions at 1733709044455 (+10 ms)Running coprocessor post-open hooks at 1733709044469 (+14 ms)Region opened successfully at 1733709044469 2024-12-09T01:50:44,472 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811., pid=6, masterSystemTime=1733709044435 2024-12-09T01:50:44,475 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:50:44,475 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:50:44,476 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=813a51a97e7f9ea6d5c277ef30637811, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,45353,1733709043006 2024-12-09T01:50:44,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 813a51a97e7f9ea6d5c277ef30637811, server=ef6f18c58dc9,45353,1733709043006 because future has completed 2024-12-09T01:50:44,486 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T01:50:44,486 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 813a51a97e7f9ea6d5c277ef30637811, server=ef6f18c58dc9,45353,1733709043006 in 201 msec 2024-12-09T01:50:44,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T01:50:44,490 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=813a51a97e7f9ea6d5c277ef30637811, ASSIGN in 378 msec 2024-12-09T01:50:44,491 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T01:50:44,491 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733709044491"}]},"ts":"1733709044491"} 2024-12-09T01:50:44,494 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-12-09T01:50:44,495 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T01:50:44,497 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 423 msec 2024-12-09T01:50:49,361 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T01:50:49,363 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:50:49,389 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:50:49,391 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:50:49,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:50:49,404 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-12-09T01:50:53,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T01:50:53,060 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T01:50:53,061 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T01:50:53,061 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-12-09T01:50:53,062 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:50:53,062 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T01:50:53,062 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T01:50:53,062 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T01:50:54,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46389 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:50:54,106 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-12-09T01:50:54,106 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-12-09T01:50:54,109 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T01:50:54,109 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:50:54,122 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:50:54,125 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:50:54,126 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:50:54,126 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:50:54,126 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:50:54,126 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e308c17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:50:54,127 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21ddd8f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:50:54,255 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ef889f8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/java.io.tmpdir/jetty-localhost-44653-hadoop-hdfs-3_4_1-tests_jar-_-any-15447298913963008018/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:54,255 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51f59516{HTTP/1.1, (http/1.1)}{localhost:44653} 2024-12-09T01:50:54,255 INFO [Time-limited test {}] server.Server(415): Started @116993ms 2024-12-09T01:50:54,257 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:50:54,303 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:50:54,305 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:50:54,306 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:50:54,306 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:50:54,306 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:50:54,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2281f2b5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:50:54,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@83c4e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:50:54,353 WARN [Thread-830 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data6/current/BP-1767086187-172.17.0.2-1733709042101/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:54,353 WARN [Thread-829 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data5/current/BP-1767086187-172.17.0.2-1733709042101/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:54,378 WARN [Thread-809 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:50:54,383 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe548530fb96bc13 with lease ID 0x3209ef898166a611: Processing first storage report for DS-7053096a-bf37-4a5d-a16a-acd827b900db from datanode DatanodeRegistration(127.0.0.1:36535, datanodeUuid=7f1c8aa2-2801-4df3-a93d-21c09813d43a, infoPort=37827, infoSecurePort=0, ipcPort=32857, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101) 2024-12-09T01:50:54,383 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe548530fb96bc13 with lease ID 0x3209ef898166a611: from storage DS-7053096a-bf37-4a5d-a16a-acd827b900db node DatanodeRegistration(127.0.0.1:36535, datanodeUuid=7f1c8aa2-2801-4df3-a93d-21c09813d43a, infoPort=37827, infoSecurePort=0, ipcPort=32857, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T01:50:54,383 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbe548530fb96bc13 with lease ID 0x3209ef898166a611: Processing first storage report for DS-131b135f-7884-4dad-8f8c-acf77a5e9c8f from datanode DatanodeRegistration(127.0.0.1:36535, datanodeUuid=7f1c8aa2-2801-4df3-a93d-21c09813d43a, infoPort=37827, infoSecurePort=0, ipcPort=32857, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101) 2024-12-09T01:50:54,383 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbe548530fb96bc13 with lease ID 0x3209ef898166a611: from storage DS-131b135f-7884-4dad-8f8c-acf77a5e9c8f node DatanodeRegistration(127.0.0.1:36535, datanodeUuid=7f1c8aa2-2801-4df3-a93d-21c09813d43a, infoPort=37827, infoSecurePort=0, ipcPort=32857, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:50:54,427 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@58872ac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/java.io.tmpdir/jetty-localhost-43163-hadoop-hdfs-3_4_1-tests_jar-_-any-1381284882399724905/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:54,427 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3afd309b{HTTP/1.1, (http/1.1)}{localhost:43163} 2024-12-09T01:50:54,427 INFO [Time-limited test {}] server.Server(415): Started @117164ms 2024-12-09T01:50:54,429 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:50:54,466 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:50:54,470 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:50:54,472 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:50:54,472 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:50:54,472 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:50:54,473 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@719bbb9b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:50:54,473 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dc0bdb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:50:54,525 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7/current/BP-1767086187-172.17.0.2-1733709042101/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:54,525 WARN [Thread-865 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8/current/BP-1767086187-172.17.0.2-1733709042101/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:54,553 WARN [Thread-844 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:50:54,556 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4a773e2becd4606 with lease ID 0x3209ef898166a612: Processing first storage report for DS-cb689a3a-d25d-4e77-a460-d129bf73e88f from datanode DatanodeRegistration(127.0.0.1:35213, datanodeUuid=6324855b-2229-48c2-8de4-9d5441165530, infoPort=42515, infoSecurePort=0, ipcPort=37231, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101) 2024-12-09T01:50:54,556 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4a773e2becd4606 with lease ID 0x3209ef898166a612: from storage DS-cb689a3a-d25d-4e77-a460-d129bf73e88f node DatanodeRegistration(127.0.0.1:35213, datanodeUuid=6324855b-2229-48c2-8de4-9d5441165530, infoPort=42515, infoSecurePort=0, ipcPort=37231, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:50:54,556 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4a773e2becd4606 with lease ID 0x3209ef898166a612: Processing first storage report for DS-20ec7d7d-fa15-411d-b56b-b8afb5e230de from datanode DatanodeRegistration(127.0.0.1:35213, datanodeUuid=6324855b-2229-48c2-8de4-9d5441165530, infoPort=42515, infoSecurePort=0, ipcPort=37231, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101) 2024-12-09T01:50:54,556 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4a773e2becd4606 with lease ID 0x3209ef898166a612: from storage DS-20ec7d7d-fa15-411d-b56b-b8afb5e230de node DatanodeRegistration(127.0.0.1:35213, datanodeUuid=6324855b-2229-48c2-8de4-9d5441165530, infoPort=42515, infoSecurePort=0, ipcPort=37231, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:50:54,598 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5547c7e1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/java.io.tmpdir/jetty-localhost-34597-hadoop-hdfs-3_4_1-tests_jar-_-any-2371494642675181086/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:54,598 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78e445ac{HTTP/1.1, (http/1.1)}{localhost:34597} 2024-12-09T01:50:54,598 INFO [Time-limited test {}] server.Server(415): Started @117335ms 2024-12-09T01:50:54,599 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:50:54,714 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data10/current/BP-1767086187-172.17.0.2-1733709042101/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:54,714 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data9/current/BP-1767086187-172.17.0.2-1733709042101/current, will proceed with Du for space computation calculation, 2024-12-09T01:50:54,737 WARN [Thread-879 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:50:54,740 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5e2be8b7e9eccbce with lease ID 0x3209ef898166a613: Processing first storage report for DS-6cdd1297-2841-4193-aeb4-81deb67dacc0 from datanode DatanodeRegistration(127.0.0.1:33411, datanodeUuid=c4bbab35-2bdb-4dee-9b0f-18c8e00621d3, infoPort=42671, infoSecurePort=0, ipcPort=40069, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101) 2024-12-09T01:50:54,740 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e2be8b7e9eccbce with lease ID 0x3209ef898166a613: from storage DS-6cdd1297-2841-4193-aeb4-81deb67dacc0 node DatanodeRegistration(127.0.0.1:33411, datanodeUuid=c4bbab35-2bdb-4dee-9b0f-18c8e00621d3, infoPort=42671, infoSecurePort=0, ipcPort=40069, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:50:54,740 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5e2be8b7e9eccbce with lease ID 0x3209ef898166a613: Processing first storage report for DS-d9cffe23-7397-4802-a956-524420f76e29 from datanode DatanodeRegistration(127.0.0.1:33411, datanodeUuid=c4bbab35-2bdb-4dee-9b0f-18c8e00621d3, infoPort=42671, infoSecurePort=0, ipcPort=40069, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101) 2024-12-09T01:50:54,740 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5e2be8b7e9eccbce with lease ID 0x3209ef898166a613: from storage DS-d9cffe23-7397-4802-a956-524420f76e29 node DatanodeRegistration(127.0.0.1:33411, datanodeUuid=c4bbab35-2bdb-4dee-9b0f-18c8e00621d3, infoPort=42671, infoSecurePort=0, ipcPort=40069, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:50:54,823 WARN [ResponseProcessor for block BP-1767086187-172.17.0.2-1733709042101:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1767086187-172.17.0.2-1733709042101:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,823 WARN [ResponseProcessor for block BP-1767086187-172.17.0.2-1733709042101:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1767086187-172.17.0.2-1733709042101:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,824 WARN [DataStreamer for file /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 block BP-1767086187-172.17.0.2-1733709042101:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]) is bad. 2024-12-09T01:50:54,826 WARN [BP-1767086187-172.17.0.2-1733709042101 heartbeating to localhost/127.0.0.1:42375 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1767086187-172.17.0.2-1733709042101 (Datanode Uuid 63d05511-8676-42d8-83ea-542fafbdd738) service to localhost/127.0.0.1:42375 2024-12-09T01:50:54,827 WARN [ResponseProcessor for block BP-1767086187-172.17.0.2-1733709042101:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1767086187-172.17.0.2-1733709042101:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1767086187-172.17.0.2-1733709042101:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,828 WARN [DataStreamer for file /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta block BP-1767086187-172.17.0.2-1733709042101:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]) is bad. 2024-12-09T01:50:54,828 WARN [ResponseProcessor for block BP-1767086187-172.17.0.2-1733709042101:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1767086187-172.17.0.2-1733709042101:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1767086187-172.17.0.2-1733709042101:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,828 WARN [DataStreamer for file /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/WALs/ef6f18c58dc9,46389,1733709042944/ef6f18c58dc9%2C46389%2C1733709042944.1733709043119 block BP-1767086187-172.17.0.2-1733709042101:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK], DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]) is bad. 2024-12-09T01:50:54,829 WARN [DataStreamer for file /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 block BP-1767086187-172.17.0.2-1733709042101:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK], DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]) is bad. 2024-12-09T01:50:54,828 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:47562 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46423:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47562 dst: /127.0.0.1:46423 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:54,830 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data4/current/BP-1767086187-172.17.0.2-1733709042101 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:54,830 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:47568 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:46423:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47568 dst: /127.0.0.1:46423 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:54,831 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37106 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34383:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37106 dst: /127.0.0.1:34383 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:54,830 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data3/current/BP-1767086187-172.17.0.2-1733709042101 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:54,831 WARN [PacketResponder: BP-1767086187-172.17.0.2-1733709042101:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46423] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:54,831 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37116 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34383:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37116 dst: /127.0.0.1:34383 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:54,832 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-925780710_22 at /127.0.0.1:37078 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34383:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37078 dst: /127.0.0.1:34383 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:54,831 WARN [PacketResponder: BP-1767086187-172.17.0.2-1733709042101:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46423] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:54,833 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1472031258_22 at /127.0.0.1:37148 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:34383:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37148 dst: /127.0.0.1:34383 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:54,833 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-925780710_22 at /127.0.0.1:47518 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:46423:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47518 dst: /127.0.0.1:46423 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:54,833 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1472031258_22 at /127.0.0.1:47606 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:46423:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47606 dst: /127.0.0.1:46423 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:54,841 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@229a8eec{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:54,841 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7720beab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:50:54,842 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:50:54,842 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66182b08{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:50:54,842 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7982676d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,STOPPED} 2024-12-09T01:50:54,843 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:50:54,843 WARN [DataStreamer for file /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 block BP-1767086187-172.17.0.2-1733709042101:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,852 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@50790d86 {}] datanode.DataXceiver(331): 127.0.0.1:34383:DataXceiver error processing unknown operation src: /127.0.0.1:55260 dst: /127.0.0.1:34383 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:54,853 WARN [DataStreamer for file /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 block BP-1767086187-172.17.0.2-1733709042101:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,853 WARN [DataStreamer for file /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta block BP-1767086187-172.17.0.2-1733709042101:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,854 WARN [DataStreamer for file /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/WALs/ef6f18c58dc9,46389,1733709042944/ef6f18c58dc9%2C46389%2C1733709042944.1733709043119 block BP-1767086187-172.17.0.2-1733709042101:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,861 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67cf8368{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:54,861 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30e7c448{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:50:54,862 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:50:54,862 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65dec1b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:50:54,862 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7183cb8b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,STOPPED} 2024-12-09T01:50:54,864 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:50:54,864 WARN [BP-1767086187-172.17.0.2-1733709042101 heartbeating to localhost/127.0.0.1:42375 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:50:54,864 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:50:54,864 WARN [BP-1767086187-172.17.0.2-1733709042101 heartbeating to localhost/127.0.0.1:42375 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1767086187-172.17.0.2-1733709042101 (Datanode Uuid 5698537a-b462-405f-a34c-4c158c19f051) service to localhost/127.0.0.1:42375 2024-12-09T01:50:54,864 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data1/current/BP-1767086187-172.17.0.2-1733709042101 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:54,865 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data2/current/BP-1767086187-172.17.0.2-1733709042101 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:54,865 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:50:54,870 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811., hostname=ef6f18c58dc9,45353,1733709043006, seqNum=2] 2024-12-09T01:50:54,873 ERROR [FSHLog-0-hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b-prefix:ef6f18c58dc9,45353,1733709043006 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,873 WARN [FSHLog-0-hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b-prefix:ef6f18c58dc9,45353,1733709043006 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,873 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,874 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C45353%2C1733709043006:(num 1733709043481) roll requested 2024-12-09T01:50:54,874 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C45353%2C1733709043006.1733709054874 2024-12-09T01:50:54,897 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:54,897 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:54,897 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:54,897 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:54,897 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:54,898 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709054874 2024-12-09T01:50:54,898 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,899 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:54,900 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-09T01:50:54,900 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-09T01:50:54,900 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 2024-12-09T01:50:54,902 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42671:42671),(127.0.0.1/127.0.0.1:37827:37827)] 2024-12-09T01:50:54,902 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 is not closed yet, will try archiving it next time 2024-12-09T01:50:54,905 WARN [IPC Server handler 1 on default port 42375 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-12-09T01:50:54,911 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 after 8ms 2024-12-09T01:50:55,093 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:56,034 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:56,905 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:56,907 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709054874 2024-12-09T01:50:56,907 WARN [ResponseProcessor for block BP-1767086187-172.17.0.2-1733709042101:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1767086187-172.17.0.2-1733709042101:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:56,908 WARN [DataStreamer for file /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709054874 block BP-1767086187-172.17.0.2-1733709042101:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK], DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:50:56,908 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:42810 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:33411:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42810 dst: /127.0.0.1:33411 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:56,908 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:53456 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:36535:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53456 dst: /127.0.0.1:36535 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:56,910 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5547c7e1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:50:56,910 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78e445ac{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:50:56,910 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:50:56,910 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dc0bdb3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:50:56,910 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@719bbb9b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,STOPPED} 2024-12-09T01:50:56,911 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:50:56,911 WARN [BP-1767086187-172.17.0.2-1733709042101 heartbeating to localhost/127.0.0.1:42375 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:50:56,911 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:50:56,911 WARN [BP-1767086187-172.17.0.2-1733709042101 heartbeating to localhost/127.0.0.1:42375 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1767086187-172.17.0.2-1733709042101 (Datanode Uuid c4bbab35-2bdb-4dee-9b0f-18c8e00621d3) service to localhost/127.0.0.1:42375 2024-12-09T01:50:56,912 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data9/current/BP-1767086187-172.17.0.2-1733709042101 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:56,912 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data10/current/BP-1767086187-172.17.0.2-1733709042101 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:50:56,912 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:50:57,094 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:58,035 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:58,905 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:58,906 WARN [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]] 2024-12-09T01:50:58,906 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C45353%2C1733709043006:(num 1733709054874) roll requested 2024-12-09T01:50:58,906 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C45353%2C1733709043006.1733709058906 2024-12-09T01:50:58,909 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:58,909 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]) is bad. 2024-12-09T01:50:58,909 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741839_1021 2024-12-09T01:50:58,911 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 after 4011ms 2024-12-09T01:50:58,913 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK] 2024-12-09T01:50:58,918 WARN [Thread-911 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:58,918 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:53488 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741840_1022] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data6]'}, localName='127.0.0.1:36535', datanodeUuid='7f1c8aa2-2801-4df3-a93d-21c09813d43a', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741840_1022 to mirror 127.0.0.1:33411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:58,918 WARN [Thread-911 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:50:58,918 WARN [Thread-911 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741840_1022 2024-12-09T01:50:58,918 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:53488 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741840_1022] {}] datanode.BlockReceiver(316): Block 1073741840 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T01:50:58,918 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:53488 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:36535:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53488 dst: /127.0.0.1:36535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:50:58,919 WARN [Thread-911 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:50:58,920 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T01:50:58,923 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:58,923 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:58,923 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:58,924 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:58,924 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:50:58,924 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709054874 with entries=3, filesize=3.51 KB; new WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709058906 2024-12-09T01:50:58,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36535 is added to blk_1073741838_1020 (size=3600) 2024-12-09T01:50:58,931 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37827:37827),(127.0.0.1/127.0.0.1:42515:42515)] 2024-12-09T01:50:58,931 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 is not closed yet, will try archiving it next time 2024-12-09T01:50:58,931 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709054874 is not closed yet, will try archiving it next time 2024-12-09T01:50:59,094 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:50:59,327 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 is not closed yet, will try archiving it next time 2024-12-09T01:51:00,035 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:00,395 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@26d56e9a[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36535, datanodeUuid=7f1c8aa2-2801-4df3-a93d-21c09813d43a, infoPort=37827, infoSecurePort=0, ipcPort=32857, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101):Failed to transfer BP-1767086187-172.17.0.2-1733709042101:blk_1073741838_1020 to 127.0.0.1:33411 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:00,924 WARN [ResponseProcessor for block BP-1767086187-172.17.0.2-1733709042101:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1767086187-172.17.0.2-1733709042101:blk_1073741841_1023 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:00,924 WARN [DataStreamer for file /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709058906 block BP-1767086187-172.17.0.2-1733709042101:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:00,924 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:53490 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:36535:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53490 dst: /127.0.0.1:36535 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:00,925 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34606 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34606 dst: /127.0.0.1:35213 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:00,926 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ef889f8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:00,926 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51f59516{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:00,926 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:00,926 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21ddd8f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:00,926 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e308c17{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:00,928 WARN [BP-1767086187-172.17.0.2-1733709042101 heartbeating to localhost/127.0.0.1:42375 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:00,928 WARN [BP-1767086187-172.17.0.2-1733709042101 heartbeating to localhost/127.0.0.1:42375 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1767086187-172.17.0.2-1733709042101 (Datanode Uuid 7f1c8aa2-2801-4df3-a93d-21c09813d43a) service to localhost/127.0.0.1:42375 2024-12-09T01:51:00,928 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:00,928 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:00,929 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data6/current/BP-1767086187-172.17.0.2-1733709042101 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:00,929 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data5/current/BP-1767086187-172.17.0.2-1733709042101 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:00,929 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:00,931 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:00,931 WARN [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]] 2024-12-09T01:51:00,931 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C45353%2C1733709043006:(num 1733709058906) roll requested 2024-12-09T01:51:00,932 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C45353%2C1733709043006.1733709060931 2024-12-09T01:51:00,934 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:00,935 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK], DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:00,935 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741842_1025 2024-12-09T01:51:00,935 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:00,937 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34383 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:00,937 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34616 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741843_1026 to mirror 127.0.0.1:34383 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:00,938 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34616 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T01:51:00,938 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]) is bad. 2024-12-09T01:51:00,938 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741843_1026 2024-12-09T01:51:00,938 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34616 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34616 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:00,938 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK] 2024-12-09T01:51:00,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45353 {}] regionserver.HRegion(8855): Flush requested on 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:51:00,940 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 813a51a97e7f9ea6d5c277ef30637811 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T01:51:00,942 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34630 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741844_1027 to mirror 127.0.0.1:46423 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:00,942 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34630 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T01:51:00,942 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46423 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:00,942 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34630 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34630 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:00,942 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]) is bad. 2024-12-09T01:51:00,942 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741844_1027 2024-12-09T01:51:00,944 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK] 2024-12-09T01:51:00,945 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:00,945 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:00,945 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741845_1028 2024-12-09T01:51:00,946 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:00,947 WARN [IPC Server handler 3 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T01:51:00,947 WARN [IPC Server handler 3 on default port 42375 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T01:51:00,947 WARN [IPC Server handler 3 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T01:51:00,950 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:00,950 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:00,950 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:00,950 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:00,950 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:00,950 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709058906 with entries=7, filesize=7.25 KB; new WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709060931 2024-12-09T01:51:00,951 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42515:42515)] 2024-12-09T01:51:00,951 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 is not closed yet, will try archiving it next time 2024-12-09T01:51:00,951 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709058906 is not closed yet, will try archiving it next time 2024-12-09T01:51:00,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741841_1024 (size=7430) 2024-12-09T01:51:00,953 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 is not closed yet, will try archiving it next time 2024-12-09T01:51:00,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/349a0c6f7d8649f7b68fd79a5d78bc21 is 1080, key is row0002/info:/1733709056914/Put/seqid=0 2024-12-09T01:51:00,969 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:00,969 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]) is bad. 2024-12-09T01:51:00,970 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741847_1030 2024-12-09T01:51:00,970 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK] 2024-12-09T01:51:00,971 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:00,971 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]) is bad. 2024-12-09T01:51:00,971 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741848_1031 2024-12-09T01:51:00,972 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK] 2024-12-09T01:51:00,973 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:00,973 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:00,973 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741849_1032 2024-12-09T01:51:00,974 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:00,976 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:00,976 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34646 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741850_1033 to mirror 127.0.0.1:36535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:00,976 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:00,976 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741850_1033 2024-12-09T01:51:00,976 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34646 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:00,977 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34646 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34646 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:00,977 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:00,977 WARN [IPC Server handler 2 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T01:51:00,978 WARN [IPC Server handler 2 on default port 42375 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T01:51:00,978 WARN [IPC Server handler 2 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T01:51:00,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741851_1034 (size=10347) 2024-12-09T01:51:01,095 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:01,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/349a0c6f7d8649f7b68fd79a5d78bc21 2024-12-09T01:51:01,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/349a0c6f7d8649f7b68fd79a5d78bc21 as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/349a0c6f7d8649f7b68fd79a5d78bc21 2024-12-09T01:51:01,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/349a0c6f7d8649f7b68fd79a5d78bc21, entries=5, sequenceid=11, filesize=10.1 K 2024-12-09T01:51:01,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 813a51a97e7f9ea6d5c277ef30637811 in 456ms, sequenceid=11, compaction requested=false 2024-12-09T01:51:01,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 813a51a97e7f9ea6d5c277ef30637811: 2024-12-09T01:51:01,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45353 {}] regionserver.HRegion(8855): Flush requested on 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:51:01,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 813a51a97e7f9ea6d5c277ef30637811 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-12-09T01:51:01,575 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/5fd91dca4ec648da8c4f3f4807016617 is 1080, key is row0007/info:/1733709060941/Put/seqid=0 2024-12-09T01:51:01,577 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:01,577 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]) is bad. 2024-12-09T01:51:01,577 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741852_1035 2024-12-09T01:51:01,578 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK] 2024-12-09T01:51:01,579 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:01,579 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]) is bad. 2024-12-09T01:51:01,579 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741853_1036 2024-12-09T01:51:01,580 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK] 2024-12-09T01:51:01,582 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34670 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741854_1037] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741854_1037 to mirror 127.0.0.1:33411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:01,582 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:01,582 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34670 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741854_1037] {}] datanode.BlockReceiver(316): Block 1073741854 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:01,582 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:01,582 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741854_1037 2024-12-09T01:51:01,582 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34670 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741854_1037] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34670 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:01,583 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:01,584 WARN [Thread-935 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:01,584 WARN [Thread-935 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:01,584 WARN [Thread-935 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741855_1038 2024-12-09T01:51:01,584 WARN [Thread-935 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:01,585 WARN [IPC Server handler 4 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T01:51:01,585 WARN [IPC Server handler 4 on default port 42375 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T01:51:01,585 WARN [IPC Server handler 4 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T01:51:01,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741856_1039 (size=12506) 2024-12-09T01:51:01,589 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/5fd91dca4ec648da8c4f3f4807016617 2024-12-09T01:51:01,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/5fd91dca4ec648da8c4f3f4807016617 as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/5fd91dca4ec648da8c4f3f4807016617 2024-12-09T01:51:01,602 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/5fd91dca4ec648da8c4f3f4807016617, entries=7, sequenceid=24, filesize=12.2 K 2024-12-09T01:51:01,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 813a51a97e7f9ea6d5c277ef30637811 in 34ms, sequenceid=24, compaction requested=false 2024-12-09T01:51:01,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 813a51a97e7f9ea6d5c277ef30637811: 2024-12-09T01:51:01,604 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-12-09T01:51:01,604 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:51:01,604 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/5fd91dca4ec648da8c4f3f4807016617 because midkey is the same as first or last row 2024-12-09T01:51:02,035 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:02,951 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:02,952 WARN [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]] 2024-12-09T01:51:02,952 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C45353%2C1733709043006:(num 1733709060931) roll requested 2024-12-09T01:51:02,952 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C45353%2C1733709043006.1733709062952 2024-12-09T01:51:02,955 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:02,955 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:02,955 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741857_1040 2024-12-09T01:51:02,955 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:02,957 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:02,957 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]) is bad. 2024-12-09T01:51:02,957 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741858_1041 2024-12-09T01:51:02,957 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK] 2024-12-09T01:51:02,958 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:02,959 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK], DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:02,959 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741859_1042 2024-12-09T01:51:02,959 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:02,962 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34404 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741860_1043 to mirror 127.0.0.1:34383 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:02,962 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34383 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:02,962 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]) is bad. 2024-12-09T01:51:02,962 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34404 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T01:51:02,962 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741860_1043 2024-12-09T01:51:02,962 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34404 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34404 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:02,963 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK] 2024-12-09T01:51:02,963 WARN [IPC Server handler 2 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T01:51:02,964 WARN [IPC Server handler 2 on default port 42375 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T01:51:02,964 WARN [IPC Server handler 2 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T01:51:02,966 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:02,967 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:02,967 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:02,967 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:02,967 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:02,967 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709060931 with entries=17, filesize=17.07 KB; new WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709062952 2024-12-09T01:51:02,968 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42515:42515)] 2024-12-09T01:51:02,968 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 is not closed yet, will try archiving it next time 2024-12-09T01:51:02,968 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709060931 is not closed yet, will try archiving it next time 2024-12-09T01:51:02,969 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709054874 to hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/oldWALs/ef6f18c58dc9%2C45353%2C1733709043006.1733709054874 2024-12-09T01:51:02,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741846_1029 (size=17486) 2024-12-09T01:51:02,970 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709058906 to hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/oldWALs/ef6f18c58dc9%2C45353%2C1733709043006.1733709058906 2024-12-09T01:51:02,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45353 {}] regionserver.HRegion(8855): Flush requested on 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:51:02,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 813a51a97e7f9ea6d5c277ef30637811 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T01:51:02,995 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/b39e18d53836413482aedbaeca1ced68 is 1079, key is tmprow/info:/1733709062989/Put/seqid=0 2024-12-09T01:51:02,998 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34383 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:02,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34426 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741862_1045 to mirror 127.0.0.1:34383 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:02,998 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]) is bad. 2024-12-09T01:51:02,998 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741862_1045 2024-12-09T01:51:02,998 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34426 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:02,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34426 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34426 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:02,998 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK] 2024-12-09T01:51:03,000 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46423 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:03,000 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34432 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741863_1046] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741863_1046 to mirror 127.0.0.1:46423 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:03,000 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]) is bad. 2024-12-09T01:51:03,001 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741863_1046 2024-12-09T01:51:03,001 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34432 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741863_1046] {}] datanode.BlockReceiver(316): Block 1073741863 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:03,001 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34432 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741863_1046] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34432 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:03,001 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK] 2024-12-09T01:51:03,003 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34446 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741864_1047] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741864_1047 to mirror 127.0.0.1:33411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:03,003 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:03,003 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:03,003 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34446 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741864_1047] {}] datanode.BlockReceiver(316): Block 1073741864 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:03,003 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741864_1047 2024-12-09T01:51:03,003 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34446 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741864_1047] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34446 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:03,004 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:03,005 WARN [Thread-945 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:03,005 WARN [Thread-945 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:03,005 WARN [Thread-945 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741865_1048 2024-12-09T01:51:03,005 WARN [Thread-945 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:03,006 WARN [IPC Server handler 0 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T01:51:03,006 WARN [IPC Server handler 0 on default port 42375 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T01:51:03,006 WARN [IPC Server handler 0 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T01:51:03,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741866_1049 (size=6027) 2024-12-09T01:51:03,095 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:03,370 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 is not closed yet, will try archiving it next time 2024-12-09T01:51:03,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/b39e18d53836413482aedbaeca1ced68 2024-12-09T01:51:03,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/b39e18d53836413482aedbaeca1ced68 as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/b39e18d53836413482aedbaeca1ced68 2024-12-09T01:51:03,423 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/b39e18d53836413482aedbaeca1ced68, entries=1, sequenceid=34, filesize=5.9 K 2024-12-09T01:51:03,425 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 813a51a97e7f9ea6d5c277ef30637811 in 434ms, sequenceid=34, compaction requested=true 2024-12-09T01:51:03,425 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 813a51a97e7f9ea6d5c277ef30637811: 2024-12-09T01:51:03,425 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-12-09T01:51:03,425 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:51:03,425 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/5fd91dca4ec648da8c4f3f4807016617 because midkey is the same as first or last row 2024-12-09T01:51:03,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 813a51a97e7f9ea6d5c277ef30637811:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T01:51:03,425 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:51:03,425 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T01:51:03,427 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T01:51:03,427 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.HStore(1541): 813a51a97e7f9ea6d5c277ef30637811/info is initiating minor compaction (all files) 2024-12-09T01:51:03,427 INFO [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 813a51a97e7f9ea6d5c277ef30637811/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:51:03,427 INFO [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/349a0c6f7d8649f7b68fd79a5d78bc21, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/5fd91dca4ec648da8c4f3f4807016617, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/b39e18d53836413482aedbaeca1ced68] into tmpdir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp, totalSize=28.2 K 2024-12-09T01:51:03,428 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] compactions.Compactor(225): Compacting 349a0c6f7d8649f7b68fd79a5d78bc21, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733709056914 2024-12-09T01:51:03,428 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5fd91dca4ec648da8c4f3f4807016617, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1733709060941 2024-12-09T01:51:03,429 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] compactions.Compactor(225): Compacting b39e18d53836413482aedbaeca1ced68, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733709062989 2024-12-09T01:51:03,445 INFO [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 813a51a97e7f9ea6d5c277ef30637811#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:51:03,446 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/0bf686ba63c24831b2b092af1ec82db0 is 1080, key is row0002/info:/1733709056914/Put/seqid=0 2024-12-09T01:51:03,447 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:03,448 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]) is bad. 2024-12-09T01:51:03,448 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741867_1050 2024-12-09T01:51:03,448 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK] 2024-12-09T01:51:03,450 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:03,450 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:03,450 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741868_1051 2024-12-09T01:51:03,451 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:03,452 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:03,452 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]) is bad. 2024-12-09T01:51:03,452 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741869_1052 2024-12-09T01:51:03,453 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK] 2024-12-09T01:51:03,455 WARN [Thread-954 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:03,455 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34460 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741870_1053] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741870_1053 to mirror 127.0.0.1:33411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:03,455 WARN [Thread-954 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:03,455 WARN [Thread-954 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741870_1053 2024-12-09T01:51:03,456 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34460 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741870_1053] {}] datanode.BlockReceiver(316): Block 1073741870 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:03,456 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34460 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741870_1053] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34460 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:03,456 WARN [Thread-954 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:03,457 WARN [IPC Server handler 4 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T01:51:03,457 WARN [IPC Server handler 4 on default port 42375 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T01:51:03,457 WARN [IPC Server handler 4 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T01:51:03,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741871_1054 (size=17994) 2024-12-09T01:51:03,560 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@b397d24[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35213, datanodeUuid=6324855b-2229-48c2-8de4-9d5441165530, infoPort=42515, infoSecurePort=0, ipcPort=37231, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101):Failed to transfer BP-1767086187-172.17.0.2-1733709042101:blk_1073741841_1024 to 127.0.0.1:36535 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:03,560 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5fe9233[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35213, datanodeUuid=6324855b-2229-48c2-8de4-9d5441165530, infoPort=42515, infoSecurePort=0, ipcPort=37231, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101):Failed to transfer BP-1767086187-172.17.0.2-1733709042101:blk_1073741851_1034 to 127.0.0.1:33411 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:03,869 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/0bf686ba63c24831b2b092af1ec82db0 as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/0bf686ba63c24831b2b092af1ec82db0 2024-12-09T01:51:03,876 INFO [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 813a51a97e7f9ea6d5c277ef30637811/info of 813a51a97e7f9ea6d5c277ef30637811 into 0bf686ba63c24831b2b092af1ec82db0(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:51:03,876 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 813a51a97e7f9ea6d5c277ef30637811: 2024-12-09T01:51:03,876 INFO [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811., storeName=813a51a97e7f9ea6d5c277ef30637811/info, priority=13, startTime=1733709063425; duration=0sec 2024-12-09T01:51:03,876 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T01:51:03,876 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:51:03,876 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/0bf686ba63c24831b2b092af1ec82db0 because midkey is the same as first or last row 2024-12-09T01:51:03,877 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T01:51:03,877 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:51:03,877 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/0bf686ba63c24831b2b092af1ec82db0 because midkey is the same as first or last row 2024-12-09T01:51:03,877 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-12-09T01:51:03,877 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:51:03,877 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/0bf686ba63c24831b2b092af1ec82db0 because midkey is the same as first or last row 2024-12-09T01:51:03,877 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:51:03,877 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 813a51a97e7f9ea6d5c277ef30637811:info 2024-12-09T01:51:04,036 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:04,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45353 {}] regionserver.HRegion(8855): Flush requested on 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:51:04,411 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 813a51a97e7f9ea6d5c277ef30637811 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T01:51:04,416 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/7a4cac6f758c4dbab0ccb762e96dc76d is 1079, key is tmprow/info:/1733709064410/Put/seqid=0 2024-12-09T01:51:04,417 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:04,417 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK], DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:04,417 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741872_1055 2024-12-09T01:51:04,418 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:04,419 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:04,419 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]) is bad. 2024-12-09T01:51:04,419 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741873_1056 2024-12-09T01:51:04,420 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK] 2024-12-09T01:51:04,422 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34474 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741874_1057 to mirror 127.0.0.1:36535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:04,422 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:04,422 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34474 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:04,422 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:04,422 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741874_1057 2024-12-09T01:51:04,422 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34474 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34474 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:04,422 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:04,424 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:46423 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:04,424 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34490 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741875_1058] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741875_1058 to mirror 127.0.0.1:46423 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:04,425 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]) is bad. 2024-12-09T01:51:04,425 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741875_1058 2024-12-09T01:51:04,425 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34490 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741875_1058] {}] datanode.BlockReceiver(316): Block 1073741875 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:04,425 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:34490 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741875_1058] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34490 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:04,425 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:46423,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK] 2024-12-09T01:51:04,426 WARN [IPC Server handler 3 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-09T01:51:04,426 WARN [IPC Server handler 3 on default port 42375 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-09T01:51:04,426 WARN [IPC Server handler 3 on default port 42375 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-09T01:51:04,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741876_1059 (size=6027) 2024-12-09T01:51:04,559 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@b397d24[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35213, datanodeUuid=6324855b-2229-48c2-8de4-9d5441165530, infoPort=42515, infoSecurePort=0, ipcPort=37231, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101):Failed to transfer BP-1767086187-172.17.0.2-1733709042101:blk_1073741846_1029 to 127.0.0.1:33411 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:04,559 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5fe9233[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35213, datanodeUuid=6324855b-2229-48c2-8de4-9d5441165530, infoPort=42515, infoSecurePort=0, ipcPort=37231, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101):Failed to transfer BP-1767086187-172.17.0.2-1733709042101:blk_1073741856_1039 to 127.0.0.1:46423 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:04,830 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/7a4cac6f758c4dbab0ccb762e96dc76d 2024-12-09T01:51:04,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/7a4cac6f758c4dbab0ccb762e96dc76d as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/7a4cac6f758c4dbab0ccb762e96dc76d 2024-12-09T01:51:04,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/7a4cac6f758c4dbab0ccb762e96dc76d, entries=1, sequenceid=45, filesize=5.9 K 2024-12-09T01:51:04,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 813a51a97e7f9ea6d5c277ef30637811 in 436ms, sequenceid=45, compaction requested=false 2024-12-09T01:51:04,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 813a51a97e7f9ea6d5c277ef30637811: 2024-12-09T01:51:04,847 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-12-09T01:51:04,847 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:51:04,847 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/0bf686ba63c24831b2b092af1ec82db0 because midkey is the same as first or last row 2024-12-09T01:51:04,969 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:04,969 WARN [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-12-09T01:51:05,026 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:05,029 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:05,030 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:05,030 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:05,030 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:51:05,032 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2234d719{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:05,032 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66e63d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:05,096 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:05,147 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3aa56449{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/java.io.tmpdir/jetty-localhost-42421-hadoop-hdfs-3_4_1-tests_jar-_-any-3595025657715471143/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:05,148 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@23df32cf{HTTP/1.1, (http/1.1)}{localhost:42421} 2024-12-09T01:51:05,148 INFO [Time-limited test {}] server.Server(415): Started @127885ms 2024-12-09T01:51:05,149 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:05,248 WARN [Thread-983 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:05,251 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf6ba5b081adad07e with lease ID 0x3209ef898166a614: from storage DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1 node DatanodeRegistration(127.0.0.1:38775, datanodeUuid=63d05511-8676-42d8-83ea-542fafbdd738, infoPort=34687, infoSecurePort=0, ipcPort=43513, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T01:51:05,251 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf6ba5b081adad07e with lease ID 0x3209ef898166a614: from storage DS-f3231ded-9564-4cf1-871c-41faa46f29da node DatanodeRegistration(127.0.0.1:38775, datanodeUuid=63d05511-8676-42d8-83ea-542fafbdd738, infoPort=34687, infoSecurePort=0, ipcPort=43513, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:06,036 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:06,559 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@b397d24[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35213, datanodeUuid=6324855b-2229-48c2-8de4-9d5441165530, infoPort=42515, infoSecurePort=0, ipcPort=37231, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101):Failed to transfer BP-1767086187-172.17.0.2-1733709042101:blk_1073741871_1054 to 127.0.0.1:36535 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:06,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741866_1049 (size=6027) 2024-12-09T01:51:06,969 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:07,096 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:07,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741876_1059 (size=6027) 2024-12-09T01:51:08,037 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:08,969 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:09,096 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:10,037 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:10,970 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:11,097 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:12,038 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:12,922 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T01:51:12,970 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:13,097 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:13,212 ERROR [FSHLog-0-hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData-prefix:ef6f18c58dc9,46389,1733709042944 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:13,212 WARN [FSHLog-0-hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData-prefix:ef6f18c58dc9,46389,1733709042944 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:13,212 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C46389%2C1733709042944:(num 1733709043119) roll requested 2024-12-09T01:51:13,213 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C46389%2C1733709042944.1733709073212 2024-12-09T01:51:13,216 WARN [Thread-1003 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:13,216 WARN [Thread-1003 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:13,216 WARN [Thread-1003 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741877_1060 2024-12-09T01:51:13,217 WARN [Thread-1003 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:13,218 WARN [Thread-1003 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:13,219 WARN [Thread-1003 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]) is bad. 2024-12-09T01:51:13,219 WARN [Thread-1003 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741878_1061 2024-12-09T01:51:13,219 WARN [Thread-1003 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK] 2024-12-09T01:51:13,220 WARN [Thread-1003 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:13,221 WARN [Thread-1003 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:13,221 WARN [Thread-1003 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741879_1062 2024-12-09T01:51:13,221 WARN [Thread-1003 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:13,226 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:13,226 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:13,226 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:13,226 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:13,227 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:13,227 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/WALs/ef6f18c58dc9,46389,1733709042944/ef6f18c58dc9%2C46389%2C1733709042944.1733709043119 with entries=54, filesize=26.65 KB; new WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/WALs/ef6f18c58dc9,46389,1733709042944/ef6f18c58dc9%2C46389%2C1733709042944.1733709073212 2024-12-09T01:51:13,227 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:13,227 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:13,227 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/WALs/ef6f18c58dc9,46389,1733709042944/ef6f18c58dc9%2C46389%2C1733709042944.1733709043119 2024-12-09T01:51:13,228 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34687:34687),(127.0.0.1/127.0.0.1:42515:42515)] 2024-12-09T01:51:13,228 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/WALs/ef6f18c58dc9,46389,1733709042944/ef6f18c58dc9%2C46389%2C1733709042944.1733709043119 is not closed yet, will try archiving it next time 2024-12-09T01:51:13,228 WARN [IPC Server handler 3 on default port 42375 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/WALs/ef6f18c58dc9,46389,1733709042944/ef6f18c58dc9%2C46389%2C1733709042944.1733709043119 has not been closed. Lease recovery is in progress. RecoveryId = 1064 for block blk_1073741830_1006 2024-12-09T01:51:13,228 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/WALs/ef6f18c58dc9,46389,1733709042944/ef6f18c58dc9%2C46389%2C1733709042944.1733709043119 after 1ms 2024-12-09T01:51:14,038 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:14,971 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:16,038 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:16,971 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:17,230 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/WALs/ef6f18c58dc9,46389,1733709042944/ef6f18c58dc9%2C46389%2C1733709042944.1733709043119 after 4003ms 2024-12-09T01:51:18,039 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:18,971 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:20,039 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:20,401 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C45353%2C1733709043006.1733709080401 2024-12-09T01:51:20,406 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-925780710_22 at /127.0.0.1:37650 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741881_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data4]'}, localName='127.0.0.1:38775', datanodeUuid='63d05511-8676-42d8-83ea-542fafbdd738', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741881_1065 to mirror 127.0.0.1:36535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:20,407 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-925780710_22 at /127.0.0.1:37650 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741881_1065] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T01:51:20,407 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-925780710_22 at /127.0.0.1:37650 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741881_1065] {}] datanode.DataXceiver(331): 127.0.0.1:38775:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37650 dst: /127.0.0.1:38775 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:20,406 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:20,409 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741881_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38775,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:20,409 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741881_1065 2024-12-09T01:51:20,410 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:20,411 WARN [Thread-1012 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:20,412 WARN [Thread-1012 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741882_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK], DatanodeInfoWithStorage[127.0.0.1:38775,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]) is bad. 2024-12-09T01:51:20,412 WARN [Thread-1012 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741882_1066 2024-12-09T01:51:20,413 WARN [Thread-1012 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK] 2024-12-09T01:51:20,424 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:20,424 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:20,425 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:20,425 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:20,425 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:20,425 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709062952 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709080401 2024-12-09T01:51:20,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741861_1044 (size=13591) 2024-12-09T01:51:20,428 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 is not closed yet, will try archiving it next time 2024-12-09T01:51:20,428 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709060931 to hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/oldWALs/ef6f18c58dc9%2C45353%2C1733709043006.1733709060931 2024-12-09T01:51:20,440 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34687:34687),(127.0.0.1/127.0.0.1:42515:42515)] 2024-12-09T01:51:20,440 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 is not closed yet, will try archiving it next time 2024-12-09T01:51:20,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45353 {}] regionserver.HRegion(8855): Flush requested on 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:51:20,458 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 813a51a97e7f9ea6d5c277ef30637811 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T01:51:20,470 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/36d33a4f79a64f2ca405263b903ceeb0 is 1080, key is row0013/info:/1733709080442/Put/seqid=0 2024-12-09T01:51:20,479 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:46582 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741884_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741884_1068 to mirror 127.0.0.1:36535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:20,479 WARN [Thread-1019 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:20,479 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:46582 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741884_1068] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:20,479 WARN [Thread-1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741884_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:20,479 WARN [Thread-1019 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741884_1068 2024-12-09T01:51:20,479 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:46582 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741884_1068] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46582 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:20,480 WARN [Thread-1019 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:20,492 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37666 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741885_1069] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data4]'}, localName='127.0.0.1:38775', datanodeUuid='63d05511-8676-42d8-83ea-542fafbdd738', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741885_1069 to mirror 127.0.0.1:33411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:20,492 WARN [Thread-1019 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1069 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:20,493 WARN [Thread-1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741885_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38775,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:20,493 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37666 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741885_1069] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:20,493 WARN [Thread-1019 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741885_1069 2024-12-09T01:51:20,493 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37666 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741885_1069] {}] datanode.DataXceiver(331): 127.0.0.1:38775:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37666 dst: /127.0.0.1:38775 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:20,494 WARN [Thread-1019 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:20,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741886_1070 (size=11421) 2024-12-09T01:51:20,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741886_1070 (size=11421) 2024-12-09T01:51:20,509 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/36d33a4f79a64f2ca405263b903ceeb0 2024-12-09T01:51:20,521 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/36d33a4f79a64f2ca405263b903ceeb0 as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/36d33a4f79a64f2ca405263b903ceeb0 2024-12-09T01:51:20,529 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/36d33a4f79a64f2ca405263b903ceeb0, entries=6, sequenceid=55, filesize=11.2 K 2024-12-09T01:51:20,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=6.30 KB/6455 for 813a51a97e7f9ea6d5c277ef30637811 in 72ms, sequenceid=55, compaction requested=true 2024-12-09T01:51:20,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 813a51a97e7f9ea6d5c277ef30637811: 2024-12-09T01:51:20,531 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.6 K, sizeToCheck=16.0 K 2024-12-09T01:51:20,531 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:51:20,531 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/0bf686ba63c24831b2b092af1ec82db0 because midkey is the same as first or last row 2024-12-09T01:51:20,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 813a51a97e7f9ea6d5c277ef30637811:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T01:51:20,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:51:20,531 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T01:51:20,533 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35442 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T01:51:20,533 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.HStore(1541): 813a51a97e7f9ea6d5c277ef30637811/info is initiating minor compaction (all files) 2024-12-09T01:51:20,533 INFO [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 813a51a97e7f9ea6d5c277ef30637811/info in TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:51:20,534 INFO [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/0bf686ba63c24831b2b092af1ec82db0, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/7a4cac6f758c4dbab0ccb762e96dc76d, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/36d33a4f79a64f2ca405263b903ceeb0] into tmpdir=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp, totalSize=34.6 K 2024-12-09T01:51:20,534 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0bf686ba63c24831b2b092af1ec82db0, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1733709056914 2024-12-09T01:51:20,535 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7a4cac6f758c4dbab0ccb762e96dc76d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1733709064410 2024-12-09T01:51:20,535 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] compactions.Compactor(225): Compacting 36d33a4f79a64f2ca405263b903ceeb0, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1733709064816 2024-12-09T01:51:20,578 INFO [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 813a51a97e7f9ea6d5c277ef30637811#info#compaction#24 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:51:20,578 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/42ce9e98b87649a086620ab6c1683829 is 1080, key is row0002/info:/1733709056914/Put/seqid=0 2024-12-09T01:51:20,583 WARN [Thread-1030 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1071 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:20,583 WARN [Thread-1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741887_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:20,583 WARN [Thread-1030 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741887_1071 2024-12-09T01:51:20,584 WARN [Thread-1030 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:20,589 WARN [Thread-1030 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:20,589 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:46630 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741888_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741888_1072 to mirror 127.0.0.1:33411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:20,589 WARN [Thread-1030 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741888_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:20,589 WARN [Thread-1030 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741888_1072 2024-12-09T01:51:20,589 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:46630 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741888_1072] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:20,589 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:46630 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741888_1072] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46630 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:20,590 WARN [Thread-1030 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:20,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741889_1073 (size=23502) 2024-12-09T01:51:20,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741889_1073 (size=23502) 2024-12-09T01:51:20,612 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/42ce9e98b87649a086620ab6c1683829 as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/42ce9e98b87649a086620ab6c1683829 2024-12-09T01:51:20,623 INFO [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 813a51a97e7f9ea6d5c277ef30637811/info of 813a51a97e7f9ea6d5c277ef30637811 into 42ce9e98b87649a086620ab6c1683829(size=23.0 K), total size for store is 23.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:51:20,623 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 813a51a97e7f9ea6d5c277ef30637811: 2024-12-09T01:51:20,623 INFO [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811., storeName=813a51a97e7f9ea6d5c277ef30637811/info, priority=13, startTime=1733709080531; duration=0sec 2024-12-09T01:51:20,623 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-09T01:51:20,623 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:51:20,623 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/42ce9e98b87649a086620ab6c1683829 because midkey is the same as first or last row 2024-12-09T01:51:20,623 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-09T01:51:20,623 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:51:20,623 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/42ce9e98b87649a086620ab6c1683829 because midkey is the same as first or last row 2024-12-09T01:51:20,623 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.0 K, sizeToCheck=16.0 K 2024-12-09T01:51:20,623 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:51:20,623 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/42ce9e98b87649a086620ab6c1683829 because midkey is the same as first or last row 2024-12-09T01:51:20,624 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:51:20,624 DEBUG [RS:0;ef6f18c58dc9:45353-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 813a51a97e7f9ea6d5c277ef30637811:info 2024-12-09T01:51:20,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45353 {}] regionserver.HRegion(8855): Flush requested on 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:51:20,684 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 813a51a97e7f9ea6d5c277ef30637811 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-12-09T01:51:20,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/7b5c10b1fc924325aafd6ea9bf8f0947 is 1080, key is row0018/info:/1733709080459/Put/seqid=0 2024-12-09T01:51:20,693 WARN [Thread-1038 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:20,693 WARN [Thread-1038 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:20,694 WARN [Thread-1038 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741890_1074 2024-12-09T01:51:20,694 WARN [Thread-1038 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:20,696 WARN [Thread-1038 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:20,696 WARN [Thread-1038 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK], DatanodeInfoWithStorage[127.0.0.1:38775,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:20,696 WARN [Thread-1038 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741891_1075 2024-12-09T01:51:20,697 WARN [Thread-1038 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:20,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741892_1076 (size=11421) 2024-12-09T01:51:20,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741892_1076 (size=11421) 2024-12-09T01:51:20,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/7b5c10b1fc924325aafd6ea9bf8f0947 2024-12-09T01:51:20,718 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/7b5c10b1fc924325aafd6ea9bf8f0947 as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/7b5c10b1fc924325aafd6ea9bf8f0947 2024-12-09T01:51:20,725 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/7b5c10b1fc924325aafd6ea9bf8f0947, entries=6, sequenceid=66, filesize=11.2 K 2024-12-09T01:51:20,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7530, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 813a51a97e7f9ea6d5c277ef30637811 in 43ms, sequenceid=66, compaction requested=false 2024-12-09T01:51:20,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 813a51a97e7f9ea6d5c277ef30637811: 2024-12-09T01:51:20,727 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=34.1 K, sizeToCheck=16.0 K 2024-12-09T01:51:20,727 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:51:20,727 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/42ce9e98b87649a086620ab6c1683829 because midkey is the same as first or last row 2024-12-09T01:51:20,973 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:20,973 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-12-09T01:51:21,089 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T01:51:21,089 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:51:21,089 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:21,089 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:21,090 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:21,090 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T01:51:21,090 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1812748992, stopped=false 2024-12-09T01:51:21,090 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef6f18c58dc9,46389,1733709042944 2024-12-09T01:51:21,090 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T01:51:21,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:21,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:21,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x10074784dd30002, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:21,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x10074784dd30002, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:21,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:21,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:21,093 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:51:21,093 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:51:21,093 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:21,093 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:21,094 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,45353,1733709043006' ***** 2024-12-09T01:51:21,094 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:51:21,094 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,36919,1733709043959' ***** 2024-12-09T01:51:21,094 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:51:21,094 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:51:21,094 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:21,094 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36919-0x10074784dd30002, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:21,095 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:21,096 INFO [RS:1;ef6f18c58dc9:36919 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:51:21,096 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:51:21,096 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:51:21,096 INFO [RS:1;ef6f18c58dc9:36919 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:51:21,097 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:51:21,097 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,36919,1733709043959 2024-12-09T01:51:21,097 INFO [RS:0;ef6f18c58dc9:45353 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:51:21,097 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:51:21,097 INFO [RS:0;ef6f18c58dc9:45353 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:51:21,097 INFO [RS:1;ef6f18c58dc9:36919 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;ef6f18c58dc9:36919. 2024-12-09T01:51:21,097 DEBUG [RS:1;ef6f18c58dc9:36919 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:21,097 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(3091): Received CLOSE for 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:51:21,097 DEBUG [RS:1;ef6f18c58dc9:36919 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:21,097 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,36919,1733709043959; all regions closed. 2024-12-09T01:51:21,097 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,45353,1733709043006 2024-12-09T01:51:21,097 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:51:21,097 INFO [RS:0;ef6f18c58dc9:45353 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef6f18c58dc9:45353. 2024-12-09T01:51:21,097 DEBUG [RS:0;ef6f18c58dc9:45353 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:21,098 DEBUG [RS:0;ef6f18c58dc9:45353 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:21,098 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:51:21,098 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:51:21,098 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:51:21,098 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T01:51:21,099 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 813a51a97e7f9ea6d5c277ef30637811, disabling compactions & flushes 2024-12-09T01:51:21,099 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:51:21,099 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:51:21,099 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. after waiting 0 ms 2024-12-09T01:51:21,099 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:51:21,099 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 813a51a97e7f9ea6d5c277ef30637811 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T01:51:21,102 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,102 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,104 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T01:51:21,104 DEBUG [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(1325): Online Regions={813a51a97e7f9ea6d5c277ef30637811=TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T01:51:21,104 DEBUG [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 813a51a97e7f9ea6d5c277ef30637811 2024-12-09T01:51:21,104 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,104 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:51:21,104 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:51:21,104 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:51:21,104 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:51:21,104 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:51:21,104 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,104 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,105 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-12-09T01:51:21,105 ERROR [FSHLog-0-hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b-prefix:ef6f18c58dc9,45353,1733709043006.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,106 WARN [FSHLog-0-hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b-prefix:ef6f18c58dc9,45353,1733709043006.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,106 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C45353%2C1733709043006.meta:.meta(num 1733709043846) roll requested 2024-12-09T01:51:21,106 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709081106.meta 2024-12-09T01:51:21,111 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,111 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,111 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 2024-12-09T01:51:21,112 WARN [IPC Server handler 4 on default port 42375 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741837_1013 2024-12-09T01:51:21,112 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 after 1ms 2024-12-09T01:51:21,113 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/0fcb4ca7cfa94aea8971730e164283d7 is 1079, key is tmprow/info:/1733709080885/Put/seqid=0 2024-12-09T01:51:21,125 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,125 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37726 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741894_1079] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data4]'}, localName='127.0.0.1:38775', datanodeUuid='63d05511-8676-42d8-83ea-542fafbdd738', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741894_1079 to mirror 127.0.0.1:36535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,125 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38775,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:21,125 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741894_1079 2024-12-09T01:51:21,125 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37726 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741894_1079] {}] datanode.BlockReceiver(316): Block 1073741894 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T01:51:21,125 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37726 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741894_1079] {}] datanode.DataXceiver(331): 127.0.0.1:38775:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37726 dst: /127.0.0.1:38775 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,126 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:21,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741893_1078 (size=6027) 2024-12-09T01:51:21,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741893_1078 (size=6027) 2024-12-09T01:51:21,131 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=70 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/0fcb4ca7cfa94aea8971730e164283d7 2024-12-09T01:51:21,140 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/.tmp/info/0fcb4ca7cfa94aea8971730e164283d7 as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/0fcb4ca7cfa94aea8971730e164283d7 2024-12-09T01:51:21,142 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:46658 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741895_1080] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8]'}, localName='127.0.0.1:35213', datanodeUuid='6324855b-2229-48c2-8de4-9d5441165530', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741895_1080 to mirror 127.0.0.1:34383 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,143 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741895_1080 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34383 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,143 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:46658 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741895_1080] {}] datanode.BlockReceiver(316): Block 1073741895 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T01:51:21,143 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741895_1080 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK], DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]) is bad. 2024-12-09T01:51:21,143 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741895_1080 2024-12-09T01:51:21,143 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:46658 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741895_1080] {}] datanode.DataXceiver(331): 127.0.0.1:35213:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46658 dst: /127.0.0.1:35213 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,144 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK] 2024-12-09T01:51:21,148 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/0fcb4ca7cfa94aea8971730e164283d7, entries=1, sequenceid=70, filesize=5.9 K 2024-12-09T01:51:21,149 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37738 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741896_1081] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data4]'}, localName='127.0.0.1:38775', datanodeUuid='63d05511-8676-42d8-83ea-542fafbdd738', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741896_1081 to mirror 127.0.0.1:33411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,149 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37738 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741896_1081] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-12-09T01:51:21,149 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37738 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741896_1081] {}] datanode.DataXceiver(331): 127.0.0.1:38775:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37738 dst: /127.0.0.1:38775 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,149 WARN [Thread-1045 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1081 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,150 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 813a51a97e7f9ea6d5c277ef30637811 in 51ms, sequenceid=70, compaction requested=true 2024-12-09T01:51:21,150 WARN [Thread-1045 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741896_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38775,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:21,150 WARN [Thread-1045 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741896_1081 2024-12-09T01:51:21,152 WARN [Thread-1045 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:21,152 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/349a0c6f7d8649f7b68fd79a5d78bc21, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/5fd91dca4ec648da8c4f3f4807016617, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/0bf686ba63c24831b2b092af1ec82db0, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/b39e18d53836413482aedbaeca1ced68, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/7a4cac6f758c4dbab0ccb762e96dc76d, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/36d33a4f79a64f2ca405263b903ceeb0] to archive 2024-12-09T01:51:21,153 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T01:51:21,156 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/349a0c6f7d8649f7b68fd79a5d78bc21 to hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/349a0c6f7d8649f7b68fd79a5d78bc21 2024-12-09T01:51:21,158 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/5fd91dca4ec648da8c4f3f4807016617 to hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/5fd91dca4ec648da8c4f3f4807016617 2024-12-09T01:51:21,160 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/0bf686ba63c24831b2b092af1ec82db0 to hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/0bf686ba63c24831b2b092af1ec82db0 2024-12-09T01:51:21,162 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/b39e18d53836413482aedbaeca1ced68 to hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/b39e18d53836413482aedbaeca1ced68 2024-12-09T01:51:21,163 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/7a4cac6f758c4dbab0ccb762e96dc76d to hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/7a4cac6f758c4dbab0ccb762e96dc76d 2024-12-09T01:51:21,165 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/36d33a4f79a64f2ca405263b903ceeb0 to hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/info/36d33a4f79a64f2ca405263b903ceeb0 2024-12-09T01:51:21,166 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=ef6f18c58dc9:46389 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-09T01:51:21,166 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [349a0c6f7d8649f7b68fd79a5d78bc21=10347, 5fd91dca4ec648da8c4f3f4807016617=12506, 0bf686ba63c24831b2b092af1ec82db0=17994, b39e18d53836413482aedbaeca1ced68=6027, 7a4cac6f758c4dbab0ccb762e96dc76d=6027, 36d33a4f79a64f2ca405263b903ceeb0=11421] 2024-12-09T01:51:21,169 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,169 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,170 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,170 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,170 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,170 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709081106.meta 2024-12-09T01:51:21,173 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,173 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,173 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta 2024-12-09T01:51:21,174 WARN [IPC Server handler 3 on default port 42375 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta has not been closed. Lease recovery is in progress. RecoveryId = 1083 for block blk_1073741834_1010 2024-12-09T01:51:21,174 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta after 1ms 2024-12-09T01:51:21,184 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42515:42515),(127.0.0.1/127.0.0.1:34687:34687)] 2024-12-09T01:51:21,184 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta is not closed yet, will try archiving it next time 2024-12-09T01:51:21,185 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/default/TestLogRolling-testLogRollOnDatanodeDeath/813a51a97e7f9ea6d5c277ef30637811/recovered.edits/73.seqid, newMaxSeqId=73, maxSeqId=1 2024-12-09T01:51:21,185 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:51:21,186 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 813a51a97e7f9ea6d5c277ef30637811: Waiting for close lock at 1733709081098Running coprocessor pre-close hooks at 1733709081098Disabling compacts and flushes for region at 1733709081098Disabling writes for close at 1733709081099 (+1 ms)Obtaining lock to block concurrent updates at 1733709081099Preparing flush snapshotting stores in 813a51a97e7f9ea6d5c277ef30637811 at 1733709081099Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811., syncing WAL and waiting on mvcc, flushsize=dataSize=1075, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733709081101 (+2 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. at 1733709081102 (+1 ms)Flushing 813a51a97e7f9ea6d5c277ef30637811/info: creating writer at 1733709081102Flushing 813a51a97e7f9ea6d5c277ef30637811/info: appending metadata at 1733709081112 (+10 ms)Flushing 813a51a97e7f9ea6d5c277ef30637811/info: closing flushed file at 1733709081112Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1365d076: reopening flushed file at 1733709081139 (+27 ms)Finished flush of dataSize ~1.05 KB/1075, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 813a51a97e7f9ea6d5c277ef30637811 in 51ms, sequenceid=70, compaction requested=true at 1733709081150 (+11 ms)Writing region close event to WAL at 1733709081172 (+22 ms)Running coprocessor post-close hooks at 1733709081185 (+13 ms)Closed at 1733709081185 2024-12-09T01:51:21,186 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811. 2024-12-09T01:51:21,212 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/.tmp/info/ccbcac48e5144e4e96a8b40b2e3afd5b is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1733709044071.813a51a97e7f9ea6d5c277ef30637811./info:regioninfo/1733709044476/Put/seqid=0 2024-12-09T01:51:21,216 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1084 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,215 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37756 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741898_1084] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data4]'}, localName='127.0.0.1:38775', datanodeUuid='63d05511-8676-42d8-83ea-542fafbdd738', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741898_1084 to mirror 127.0.0.1:36535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,216 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741898_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38775,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:21,216 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741898_1084 2024-12-09T01:51:21,216 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37756 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741898_1084] {}] datanode.BlockReceiver(316): Block 1073741898 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:21,216 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37756 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741898_1084] {}] datanode.DataXceiver(331): 127.0.0.1:38775:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37756 dst: /127.0.0.1:38775 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,217 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:21,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741899_1085 (size=7089) 2024-12-09T01:51:21,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741899_1085 (size=7089) 2024-12-09T01:51:21,224 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/.tmp/info/ccbcac48e5144e4e96a8b40b2e3afd5b 2024-12-09T01:51:21,250 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/.tmp/ns/4ca7e19d30c04645ad048a7d8877310e is 43, key is default/ns:d/1733709043902/Put/seqid=0 2024-12-09T01:51:21,253 WARN [Thread-1067 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:33411 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37784 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741900_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data4]'}, localName='127.0.0.1:38775', datanodeUuid='63d05511-8676-42d8-83ea-542fafbdd738', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741900_1086 to mirror 127.0.0.1:33411 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,253 WARN [Thread-1067 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38775,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK]) is bad. 2024-12-09T01:51:21,253 WARN [Thread-1067 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741900_1086 2024-12-09T01:51:21,253 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37784 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741900_1086] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:21,253 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37784 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741900_1086] {}] datanode.DataXceiver(331): 127.0.0.1:38775:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37784 dst: /127.0.0.1:38775 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,254 WARN [Thread-1067 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:33411,DS-6cdd1297-2841-4193-aeb4-81deb67dacc0,DISK] 2024-12-09T01:51:21,255 WARN [Thread-1067 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,255 WARN [Thread-1067 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK], DatanodeInfoWithStorage[127.0.0.1:35213,DS-cb689a3a-d25d-4e77-a460-d129bf73e88f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]) is bad. 2024-12-09T01:51:21,255 WARN [Thread-1067 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741901_1087 2024-12-09T01:51:21,256 WARN [Thread-1067 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK] 2024-12-09T01:51:21,258 WARN [Thread-1067 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1088 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,258 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37792 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741902_1088] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data4]'}, localName='127.0.0.1:38775', datanodeUuid='63d05511-8676-42d8-83ea-542fafbdd738', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741902_1088 to mirror 127.0.0.1:36535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,259 WARN [Thread-1067 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741902_1088 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38775,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:21,259 WARN [Thread-1067 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741902_1088 2024-12-09T01:51:21,259 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37792 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741902_1088] {}] datanode.BlockReceiver(316): Block 1073741902 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:21,259 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37792 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741902_1088] {}] datanode.DataXceiver(331): 127.0.0.1:38775:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37792 dst: /127.0.0.1:38775 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,259 WARN [Thread-1067 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:21,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741903_1089 (size=5153) 2024-12-09T01:51:21,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741903_1089 (size=5153) 2024-12-09T01:51:21,266 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/.tmp/ns/4ca7e19d30c04645ad048a7d8877310e 2024-12-09T01:51:21,289 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/.tmp/table/e2d25c7b88fc445984f10141f8f61c15 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1733709044491/Put/seqid=0 2024-12-09T01:51:21,290 WARN [Thread-1076 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,291 WARN [Thread-1076 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK], DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK]) is bad. 2024-12-09T01:51:21,291 WARN [Thread-1076 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741904_1090 2024-12-09T01:51:21,291 WARN [Thread-1076 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34383,DS-a89c88c2-2971-4181-bab7-a51a0eddf136,DISK] 2024-12-09T01:51:21,293 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:21,293 WARN [Thread-1076 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1091 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36535 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:21,293 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37822 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741905_1091] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data4]'}, localName='127.0.0.1:38775', datanodeUuid='63d05511-8676-42d8-83ea-542fafbdd738', xmitsInProgress=0}:Exception transferring block BP-1767086187-172.17.0.2-1733709042101:blk_1073741905_1091 to mirror 127.0.0.1:36535 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,293 WARN [Thread-1076 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1767086187-172.17.0.2-1733709042101:blk_1073741905_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38775,DS-f38d2b02-337e-4a8e-aaa1-451c51f713e1,DISK], DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK]) is bad. 2024-12-09T01:51:21,293 WARN [Thread-1076 {}] hdfs.DataStreamer(1850): Abandoning BP-1767086187-172.17.0.2-1733709042101:blk_1073741905_1091 2024-12-09T01:51:21,293 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37822 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741905_1091] {}] datanode.BlockReceiver(316): Block 1073741905 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-12-09T01:51:21,294 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-267260528_22 at /127.0.0.1:37822 [Receiving block BP-1767086187-172.17.0.2-1733709042101:blk_1073741905_1091] {}] datanode.DataXceiver(331): 127.0.0.1:38775:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37822 dst: /127.0.0.1:38775 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:21,294 WARN [Thread-1076 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36535,DS-7053096a-bf37-4a5d-a16a-acd827b900db,DISK] 2024-12-09T01:51:21,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741906_1092 (size=5424) 2024-12-09T01:51:21,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741906_1092 (size=5424) 2024-12-09T01:51:21,300 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/.tmp/table/e2d25c7b88fc445984f10141f8f61c15 2024-12-09T01:51:21,304 DEBUG [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T01:51:21,308 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/.tmp/info/ccbcac48e5144e4e96a8b40b2e3afd5b as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/info/ccbcac48e5144e4e96a8b40b2e3afd5b 2024-12-09T01:51:21,313 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/info/ccbcac48e5144e4e96a8b40b2e3afd5b, entries=10, sequenceid=11, filesize=6.9 K 2024-12-09T01:51:21,314 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/.tmp/ns/4ca7e19d30c04645ad048a7d8877310e as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/ns/4ca7e19d30c04645ad048a7d8877310e 2024-12-09T01:51:21,320 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/ns/4ca7e19d30c04645ad048a7d8877310e, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T01:51:21,321 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/.tmp/table/e2d25c7b88fc445984f10141f8f61c15 as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/table/e2d25c7b88fc445984f10141f8f61c15 2024-12-09T01:51:21,327 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/table/e2d25c7b88fc445984f10141f8f61c15, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T01:51:21,328 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 224ms, sequenceid=11, compaction requested=false 2024-12-09T01:51:21,333 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T01:51:21,334 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:51:21,334 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:51:21,334 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709081104Running coprocessor pre-close hooks at 1733709081104Disabling compacts and flushes for region at 1733709081104Disabling writes for close at 1733709081104Obtaining lock to block concurrent updates at 1733709081105 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733709081105Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1733709081107 (+2 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733709081185 (+78 ms)Flushing 1588230740/info: creating writer at 1733709081185Flushing 1588230740/info: appending metadata at 1733709081211 (+26 ms)Flushing 1588230740/info: closing flushed file at 1733709081212 (+1 ms)Flushing 1588230740/ns: creating writer at 1733709081232 (+20 ms)Flushing 1588230740/ns: appending metadata at 1733709081249 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733709081249Flushing 1588230740/table: creating writer at 1733709081273 (+24 ms)Flushing 1588230740/table: appending metadata at 1733709081288 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733709081288Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45d45fa2: reopening flushed file at 1733709081307 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3ed232df: reopening flushed file at 1733709081314 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a823fb9: reopening flushed file at 1733709081320 (+6 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 224ms, sequenceid=11, compaction requested=false at 1733709081328 (+8 ms)Writing region close event to WAL at 1733709081329 (+1 ms)Running coprocessor post-close hooks at 1733709081334 (+5 ms)Closed at 1733709081334 2024-12-09T01:51:21,334 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T01:51:21,363 INFO [regionserver/ef6f18c58dc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T01:51:21,363 INFO [regionserver/ef6f18c58dc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T01:51:21,504 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,45353,1733709043006; all regions closed. 2024-12-09T01:51:21,505 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,505 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,505 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,505 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,505 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:21,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741897_1082 (size=825) 2024-12-09T01:51:21,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741897_1082 (size=825) 2024-12-09T01:51:21,559 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@5fe9233[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35213, datanodeUuid=6324855b-2229-48c2-8de4-9d5441165530, infoPort=42515, infoSecurePort=0, ipcPort=37231, storageInfo=lv=-57;cid=testClusterID;nsid=1926921130;c=1733709042101):Failed to transfer BP-1767086187-172.17.0.2-1733709042101:blk_1073741861_1044 to 127.0.0.1:36535 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:22,038 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:22,052 INFO [regionserver/ef6f18c58dc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T01:51:22,052 INFO [regionserver/ef6f18c58dc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T01:51:23,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-12-09T01:51:23,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:51:23,061 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T01:51:23,284 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@52c2fb34 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1767086187-172.17.0.2-1733709042101:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:34383,null,null]) java.net.ConnectException: Call From ef6f18c58dc9/172.17.0.2 to localhost:44189 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T01:51:23,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741830_1064 (size=27294) 2024-12-09T01:51:23,928 INFO [master/ef6f18c58dc9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T01:51:23,928 INFO [master/ef6f18c58dc9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T01:51:25,113 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 after 4002ms 2024-12-09T01:51:25,175 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta after 4002ms 2024-12-09T01:51:25,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741836_1012 (size=76) 2024-12-09T01:51:25,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:51:26,111 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T01:51:26,114 DEBUG [RS:1;ef6f18c58dc9:36919 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/oldWALs 2024-12-09T01:51:26,114 INFO [RS:1;ef6f18c58dc9:36919 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C36919%2C1733709043959:(num 1733709044172) 2024-12-09T01:51:26,114 DEBUG [RS:1;ef6f18c58dc9:36919 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:26,114 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:26,114 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:51:26,114 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T01:51:26,114 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:51:26,114 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:51:26,114 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:51:26,114 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:51:26,114 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:51:26,114 INFO [RS:1;ef6f18c58dc9:36919 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36919 2024-12-09T01:51:26,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x10074784dd30002, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,36919,1733709043959 2024-12-09T01:51:26,117 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:51:26,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:51:26,118 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,36919,1733709043959] 2024-12-09T01:51:26,119 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:26,121 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,36919,1733709043959 already deleted, retry=false 2024-12-09T01:51:26,121 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,36919,1733709043959 expired; onlineServers=1 2024-12-09T01:51:26,187 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,209 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,210 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,211 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x10074784dd30002, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:26,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36919-0x10074784dd30002, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:26,220 INFO [RS:1;ef6f18c58dc9:36919 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:51:26,220 INFO [RS:1;ef6f18c58dc9:36919 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,36919,1733709043959; zookeeper connection closed. 2024-12-09T01:51:26,220 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@10bb86a8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@10bb86a8 2024-12-09T01:51:26,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,223 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:51:26,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:51:26,506 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T01:51:26,509 DEBUG [RS:0;ef6f18c58dc9:45353 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/oldWALs 2024-12-09T01:51:26,510 INFO [RS:0;ef6f18c58dc9:45353 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C45353%2C1733709043006.meta:.meta(num 1733709081106) 2024-12-09T01:51:26,510 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:26,510 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:26,510 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:26,510 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:26,511 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:26,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741883_1067 (size=18156) 2024-12-09T01:51:26,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741883_1067 (size=18156) 2024-12-09T01:51:26,725 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T01:51:26,739 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,740 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,741 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,745 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:26,747 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:27,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:27,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:27,185 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-12-09T01:51:28,121 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:28,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:28,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741835_1011 (size=393) 2024-12-09T01:51:28,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:51:29,122 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:29,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:29,185 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-12-09T01:51:29,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:51:29,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:51:30,123 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:30,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:31,124 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:31,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor104.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:31,185 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.FSHLog(580): java.nio.channels.ClosedChannelException: null at org.apache.hadoop.hdfs.ExceptionLastSeen.throwException4Close(ExceptionLastSeen.java:73) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:158) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSOutputStream.getCurrentBlockReplication(DFSOutputStream.java:775) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.HdfsDataOutputStream.getCurrentBlockReplication(HdfsDataOutputStream.java:79) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.getLogReplication(FSHLog.java:577) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.FSHLog.doCheckLogLowReplication(FSHLog.java:525) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.checkLogLowReplication(AbstractFSWAL.java:2224) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.checkLowReplication(AbstractWALRoller.java:148) ~[classes/:?] at org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:176) ~[classes/:?] 2024-12-09T01:51:31,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:51:31,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741830_1064 (size=27294) 2024-12-09T01:51:31,511 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-12-09T01:51:31,515 DEBUG [RS:0;ef6f18c58dc9:45353 {}] wal.AbstractFSWAL(1256): Moved 3 WAL file(s) to /user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/oldWALs 2024-12-09T01:51:31,515 INFO [RS:0;ef6f18c58dc9:45353 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C45353%2C1733709043006:(num 1733709080401) 2024-12-09T01:51:31,515 DEBUG [RS:0;ef6f18c58dc9:45353 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:31,515 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:31,516 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:51:31,516 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T01:51:31,516 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:51:31,516 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:51:31,516 INFO [RS:0;ef6f18c58dc9:45353 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45353 2024-12-09T01:51:31,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,45353,1733709043006 2024-12-09T01:51:31,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:51:31,519 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:51:31,520 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,45353,1733709043006] 2024-12-09T01:51:31,522 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,45353,1733709043006 already deleted, retry=false 2024-12-09T01:51:31,522 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,45353,1733709043006 expired; onlineServers=0 2024-12-09T01:51:31,522 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef6f18c58dc9,46389,1733709042944' ***** 2024-12-09T01:51:31,522 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T01:51:31,522 INFO [M:0;ef6f18c58dc9:46389 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:51:31,522 INFO [M:0;ef6f18c58dc9:46389 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:51:31,522 DEBUG [M:0;ef6f18c58dc9:46389 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T01:51:31,522 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T01:51:31,522 DEBUG [M:0;ef6f18c58dc9:46389 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T01:51:31,522 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709043215 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709043215,5,FailOnTimeoutGroup] 2024-12-09T01:51:31,522 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709043216 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709043216,5,FailOnTimeoutGroup] 2024-12-09T01:51:31,523 INFO [M:0;ef6f18c58dc9:46389 {}] hbase.ChoreService(370): Chore service for: master/ef6f18c58dc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T01:51:31,523 INFO [M:0;ef6f18c58dc9:46389 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:51:31,523 DEBUG [M:0;ef6f18c58dc9:46389 {}] master.HMaster(1795): Stopping service threads 2024-12-09T01:51:31,523 INFO [M:0;ef6f18c58dc9:46389 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T01:51:31,523 INFO [M:0;ef6f18c58dc9:46389 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:51:31,523 INFO [M:0;ef6f18c58dc9:46389 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T01:51:31,523 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T01:51:31,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T01:51:31,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:31,525 DEBUG [M:0;ef6f18c58dc9:46389 {}] zookeeper.ZKUtil(347): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T01:51:31,525 WARN [M:0;ef6f18c58dc9:46389 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T01:51:31,525 INFO [M:0;ef6f18c58dc9:46389 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/.lastflushedseqids 2024-12-09T01:51:31,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741907_1093 (size=130) 2024-12-09T01:51:31,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741907_1093 (size=130) 2024-12-09T01:51:31,532 INFO [M:0;ef6f18c58dc9:46389 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T01:51:31,532 INFO [M:0;ef6f18c58dc9:46389 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T01:51:31,532 DEBUG [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:51:31,532 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:31,532 DEBUG [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:31,532 DEBUG [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:51:31,532 DEBUG [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:31,532 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.24 KB heapSize=29.47 KB 2024-12-09T01:51:31,549 DEBUG [M:0;ef6f18c58dc9:46389 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1830b5180d1547938dcd4e9b7af27e47 is 82, key is hbase:meta,,1/info:regioninfo/1733709043882/Put/seqid=0 2024-12-09T01:51:31,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741908_1094 (size=5672) 2024-12-09T01:51:31,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741908_1094 (size=5672) 2024-12-09T01:51:31,555 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1830b5180d1547938dcd4e9b7af27e47 2024-12-09T01:51:31,578 DEBUG [M:0;ef6f18c58dc9:46389 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e4d53636330c4c849ffb605f0c3996d3 is 773, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733709044496/Put/seqid=0 2024-12-09T01:51:31,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741909_1095 (size=6254) 2024-12-09T01:51:31,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741909_1095 (size=6254) 2024-12-09T01:51:31,587 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e4d53636330c4c849ffb605f0c3996d3 2024-12-09T01:51:31,594 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e4d53636330c4c849ffb605f0c3996d3 2024-12-09T01:51:31,609 DEBUG [M:0;ef6f18c58dc9:46389 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/111c1de35df94f48ab118012dcf28afc is 69, key is ef6f18c58dc9,36919,1733709043959/rs:state/1733709044014/Put/seqid=0 2024-12-09T01:51:31,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741910_1096 (size=5224) 2024-12-09T01:51:31,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741910_1096 (size=5224) 2024-12-09T01:51:31,619 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/111c1de35df94f48ab118012dcf28afc 2024-12-09T01:51:31,621 INFO [RS:0;ef6f18c58dc9:45353 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:51:31,621 INFO [RS:0;ef6f18c58dc9:45353 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,45353,1733709043006; zookeeper connection closed. 2024-12-09T01:51:31,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:31,621 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45353-0x10074784dd30001, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:31,624 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@34e3029c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@34e3029c 2024-12-09T01:51:31,624 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-12-09T01:51:31,653 DEBUG [M:0;ef6f18c58dc9:46389 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d42f17d2f45b4020898ba37a10ec6863 is 52, key is load_balancer_on/state:d/1733709043943/Put/seqid=0 2024-12-09T01:51:31,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741911_1097 (size=5056) 2024-12-09T01:51:31,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741911_1097 (size=5056) 2024-12-09T01:51:31,667 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d42f17d2f45b4020898ba37a10ec6863 2024-12-09T01:51:31,675 DEBUG [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1830b5180d1547938dcd4e9b7af27e47 as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1830b5180d1547938dcd4e9b7af27e47 2024-12-09T01:51:31,682 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1830b5180d1547938dcd4e9b7af27e47, entries=8, sequenceid=60, filesize=5.5 K 2024-12-09T01:51:31,684 DEBUG [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/e4d53636330c4c849ffb605f0c3996d3 as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e4d53636330c4c849ffb605f0c3996d3 2024-12-09T01:51:31,690 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for e4d53636330c4c849ffb605f0c3996d3 2024-12-09T01:51:31,690 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/e4d53636330c4c849ffb605f0c3996d3, entries=6, sequenceid=60, filesize=6.1 K 2024-12-09T01:51:31,691 DEBUG [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/111c1de35df94f48ab118012dcf28afc as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/111c1de35df94f48ab118012dcf28afc 2024-12-09T01:51:31,699 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/111c1de35df94f48ab118012dcf28afc, entries=2, sequenceid=60, filesize=5.1 K 2024-12-09T01:51:31,701 DEBUG [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d42f17d2f45b4020898ba37a10ec6863 as hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d42f17d2f45b4020898ba37a10ec6863 2024-12-09T01:51:31,708 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d42f17d2f45b4020898ba37a10ec6863, entries=1, sequenceid=60, filesize=4.9 K 2024-12-09T01:51:31,709 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 177ms, sequenceid=60, compaction requested=false 2024-12-09T01:51:31,716 INFO [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:31,716 DEBUG [M:0;ef6f18c58dc9:46389 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709091532Disabling compacts and flushes for region at 1733709091532Disabling writes for close at 1733709091532Obtaining lock to block concurrent updates at 1733709091532Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733709091532Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23793, getHeapSize=30112, getOffHeapSize=0, getCellsCount=71 at 1733709091533 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733709091534 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733709091534Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733709091548 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733709091548Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733709091561 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733709091578 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733709091578Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733709091594 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733709091609 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733709091609Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733709091631 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733709091653 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733709091653Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@62338d7d: reopening flushed file at 1733709091674 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d55378b: reopening flushed file at 1733709091683 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a9c2c0a: reopening flushed file at 1733709091691 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@22a02a4a: reopening flushed file at 1733709091700 (+9 ms)Finished flush of dataSize ~23.24 KB/23793, heapSize ~29.41 KB/30112, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 177ms, sequenceid=60, compaction requested=false at 1733709091709 (+9 ms)Writing region close event to WAL at 1733709091716 (+7 ms)Closed at 1733709091716 2024-12-09T01:51:31,717 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:31,717 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:31,717 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:31,717 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:31,717 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:31,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35213 is added to blk_1073741880_1063 (size=1045) 2024-12-09T01:51:31,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38775 is added to blk_1073741880_1063 (size=1045) 2024-12-09T01:51:31,720 INFO [M:0;ef6f18c58dc9:46389 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T01:51:31,720 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:51:31,720 INFO [M:0;ef6f18c58dc9:46389 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46389 2024-12-09T01:51:31,721 INFO [M:0;ef6f18c58dc9:46389 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:51:31,823 INFO [M:0;ef6f18c58dc9:46389 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:51:31,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:31,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46389-0x10074784dd30000, quorum=127.0.0.1:59768, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:51:31,826 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3aa56449{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:31,826 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@23df32cf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:31,826 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:31,827 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66e63d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:31,827 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2234d719{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:31,829 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@f41f368 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1767086187-172.17.0.2-1733709042101:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:34383,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:44189 , LocalHost:localPort ef6f18c58dc9/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-12-09T01:51:31,830 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:31,830 WARN [BP-1767086187-172.17.0.2-1733709042101 heartbeating to localhost/127.0.0.1:42375 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:31,830 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:31,830 WARN [BP-1767086187-172.17.0.2-1733709042101 heartbeating to localhost/127.0.0.1:42375 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1767086187-172.17.0.2-1733709042101 (Datanode Uuid 63d05511-8676-42d8-83ea-542fafbdd738) service to localhost/127.0.0.1:42375 2024-12-09T01:51:31,830 ERROR [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@f41f368 {}] datanode.DataNode(1743): Cannot find BPOfferService for reporting block received for bpid=BP-1767086187-172.17.0.2-1733709042101 2024-12-09T01:51:31,831 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data3/current/BP-1767086187-172.17.0.2-1733709042101 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:31,831 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data4/current/BP-1767086187-172.17.0.2-1733709042101 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:31,832 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:31,832 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@f41f368 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1767086187-172.17.0.2-1733709042101:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:38775,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1767086187-172.17.0.2-1733709042101 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:31,832 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@f41f368 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1767086187-172.17.0.2-1733709042101:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:34383,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1767086187-172.17.0.2-1733709042101 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:31,832 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@f41f368 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1767086187-172.17.0.2-1733709042101:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:38775,null,null], DatanodeInfoWithStorage[127.0.0.1:34383,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1767086187-172.17.0.2-1733709042101:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:38775,null,null], DatanodeInfoWithStorage[127.0.0.1:34383,null,null]] 2024-12-09T01:51:31,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@58872ac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:31,835 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3afd309b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:31,835 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:31,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@83c4e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:31,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2281f2b5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:31,838 WARN [BP-1767086187-172.17.0.2-1733709042101 heartbeating to localhost/127.0.0.1:42375 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:31,838 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:31,838 WARN [BP-1767086187-172.17.0.2-1733709042101 heartbeating to localhost/127.0.0.1:42375 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1767086187-172.17.0.2-1733709042101 (Datanode Uuid 6324855b-2229-48c2-8de4-9d5441165530) service to localhost/127.0.0.1:42375 2024-12-09T01:51:31,838 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:31,838 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data7/current/BP-1767086187-172.17.0.2-1733709042101 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:31,839 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/cluster_88dd5013-1d6c-a902-27af-d49fd6c2690b/data/data8/current/BP-1767086187-172.17.0.2-1733709042101 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:31,839 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:31,846 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77cd23f0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:51:31,847 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3ed3a961{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:31,847 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:31,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@142d24a0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:31,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3e7d5546{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:31,861 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T01:51:31,902 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T01:51:31,914 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=156 (was 77) Potentially hanging thread: Command processor java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007eff40bef390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42375 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42375 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:42375 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42375 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42375 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:42375 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:38277 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-16-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42375 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-17-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42375 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42375 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42375 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007eff40bef390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38277 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:42375 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007eff40bef390.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=438 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=246 (was 267), ProcessCount=11 (was 11), AvailableMemoryMB=12447 (was 12224) - AvailableMemoryMB LEAK? - 2024-12-09T01:51:31,926 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=156, OpenFileDescriptor=438, MaxFileDescriptor=1048576, SystemLoadAverage=246, ProcessCount=11, AvailableMemoryMB=12446 2024-12-09T01:51:31,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T01:51:31,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.log.dir so I do NOT create it in target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3 2024-12-09T01:51:31,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/179d050b-72ca-117c-8548-4870eb2d2ea9/hadoop.tmp.dir so I do NOT create it in target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3 2024-12-09T01:51:31,927 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc, deleteOnExit=true 2024-12-09T01:51:31,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T01:51:31,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/test.cache.data in system properties and HBase conf 2024-12-09T01:51:31,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T01:51:31,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir in system properties and HBase conf 2024-12-09T01:51:31,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T01:51:31,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T01:51:31,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T01:51:31,928 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T01:51:31,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:51:31,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:51:31,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T01:51:31,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:51:31,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T01:51:31,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T01:51:31,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:51:31,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:51:31,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T01:51:31,929 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/nfs.dump.dir in system properties and HBase conf 2024-12-09T01:51:31,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/java.io.tmpdir in system properties and HBase conf 2024-12-09T01:51:31,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:51:31,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T01:51:31,930 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T01:51:31,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:31,950 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:51:32,038 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:32,046 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:32,068 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:32,068 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:32,069 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:51:32,069 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:32,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c727387{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:32,070 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10e56c5a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:32,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:32,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:32,206 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f6d993c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/java.io.tmpdir/jetty-localhost-34989-hadoop-hdfs-3_4_1-tests_jar-_-any-1030649502051736629/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:51:32,206 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ea918cd{HTTP/1.1, (http/1.1)}{localhost:34989} 2024-12-09T01:51:32,206 INFO [Time-limited test {}] server.Server(415): Started @154944ms 2024-12-09T01:51:32,224 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:51:32,317 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:32,320 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:32,324 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:32,324 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:32,324 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:51:32,325 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@29ec962a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:32,326 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@444db7a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:32,462 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@354edf1e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/java.io.tmpdir/jetty-localhost-45057-hadoop-hdfs-3_4_1-tests_jar-_-any-1061934179195775938/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:32,463 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2aa0e69d{HTTP/1.1, (http/1.1)}{localhost:45057} 2024-12-09T01:51:32,463 INFO [Time-limited test {}] server.Server(415): Started @155201ms 2024-12-09T01:51:32,465 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:32,552 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:32,561 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:32,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:32,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:32,569 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:51:32,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7c7a3196{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:32,573 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23fcbb95{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:32,602 WARN [Thread-1205 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data1/current/BP-2069266944-172.17.0.2-1733709091959/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:32,603 WARN [Thread-1206 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data2/current/BP-2069266944-172.17.0.2-1733709091959/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:32,641 WARN [Thread-1184 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:32,650 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x43c11c46ddb50d6f with lease ID 0xb83ed98518f626f9: Processing first storage report for DS-cedbe907-b73d-43f0-88eb-987ea205b350 from datanode DatanodeRegistration(127.0.0.1:43857, datanodeUuid=c141504a-e754-4899-983b-758e64bcb62c, infoPort=46509, infoSecurePort=0, ipcPort=39869, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959) 2024-12-09T01:51:32,650 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x43c11c46ddb50d6f with lease ID 0xb83ed98518f626f9: from storage DS-cedbe907-b73d-43f0-88eb-987ea205b350 node DatanodeRegistration(127.0.0.1:43857, datanodeUuid=c141504a-e754-4899-983b-758e64bcb62c, infoPort=46509, infoSecurePort=0, ipcPort=39869, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T01:51:32,651 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x43c11c46ddb50d6f with lease ID 0xb83ed98518f626f9: Processing first storage report for DS-3c113729-ddd4-42ba-9884-35ce8b3e3ffa from datanode DatanodeRegistration(127.0.0.1:43857, datanodeUuid=c141504a-e754-4899-983b-758e64bcb62c, infoPort=46509, infoSecurePort=0, ipcPort=39869, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959) 2024-12-09T01:51:32,651 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x43c11c46ddb50d6f with lease ID 0xb83ed98518f626f9: from storage DS-3c113729-ddd4-42ba-9884-35ce8b3e3ffa node DatanodeRegistration(127.0.0.1:43857, datanodeUuid=c141504a-e754-4899-983b-758e64bcb62c, infoPort=46509, infoSecurePort=0, ipcPort=39869, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:32,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49d720be{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/java.io.tmpdir/jetty-localhost-36765-hadoop-hdfs-3_4_1-tests_jar-_-any-13926371623790622910/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:32,714 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@37fa3e5a{HTTP/1.1, (http/1.1)}{localhost:36765} 2024-12-09T01:51:32,715 INFO [Time-limited test {}] server.Server(415): Started @155452ms 2024-12-09T01:51:32,717 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:32,858 WARN [Thread-1231 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data3/current/BP-2069266944-172.17.0.2-1733709091959/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:32,859 WARN [Thread-1232 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data4/current/BP-2069266944-172.17.0.2-1733709091959/current, will proceed with Du for space computation calculation, 2024-12-09T01:51:32,900 WARN [Thread-1220 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:32,903 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x448ffff9d3d0a9a7 with lease ID 0xb83ed98518f626fa: Processing first storage report for DS-298ad5c7-a290-48e8-8f58-473fad29767e from datanode DatanodeRegistration(127.0.0.1:41489, datanodeUuid=0e7f2e3c-18d3-4a81-975d-83e740a7cd7b, infoPort=43403, infoSecurePort=0, ipcPort=37345, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959) 2024-12-09T01:51:32,903 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x448ffff9d3d0a9a7 with lease ID 0xb83ed98518f626fa: from storage DS-298ad5c7-a290-48e8-8f58-473fad29767e node DatanodeRegistration(127.0.0.1:41489, datanodeUuid=0e7f2e3c-18d3-4a81-975d-83e740a7cd7b, infoPort=43403, infoSecurePort=0, ipcPort=37345, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:32,903 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x448ffff9d3d0a9a7 with lease ID 0xb83ed98518f626fa: Processing first storage report for DS-89fa7659-64fa-487f-a16f-fb331d050fc3 from datanode DatanodeRegistration(127.0.0.1:41489, datanodeUuid=0e7f2e3c-18d3-4a81-975d-83e740a7cd7b, infoPort=43403, infoSecurePort=0, ipcPort=37345, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959) 2024-12-09T01:51:32,903 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x448ffff9d3d0a9a7 with lease ID 0xb83ed98518f626fa: from storage DS-89fa7659-64fa-487f-a16f-fb331d050fc3 node DatanodeRegistration(127.0.0.1:41489, datanodeUuid=0e7f2e3c-18d3-4a81-975d-83e740a7cd7b, infoPort=43403, infoSecurePort=0, ipcPort=37345, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:32,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:32,962 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3 2024-12-09T01:51:32,968 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/zookeeper_0, clientPort=63721, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T01:51:32,969 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63721 2024-12-09T01:51:32,970 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:32,971 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:32,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:51:32,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41489 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:51:32,986 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c with version=8 2024-12-09T01:51:32,986 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/hbase-staging 2024-12-09T01:51:32,989 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:51:32,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:32,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:32,989 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:51:32,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:32,989 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:51:32,989 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T01:51:32,990 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:51:32,991 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37609 2024-12-09T01:51:32,992 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37609 connecting to ZooKeeper ensemble=127.0.0.1:63721 2024-12-09T01:51:32,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:376090x0, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:51:33,000 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37609-0x1007479114a0000 connected 2024-12-09T01:51:33,033 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:33,036 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:33,045 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:33,046 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c, hbase.cluster.distributed=false 2024-12-09T01:51:33,048 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:51:33,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37609 2024-12-09T01:51:33,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37609 2024-12-09T01:51:33,050 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37609 2024-12-09T01:51:33,050 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37609 2024-12-09T01:51:33,053 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37609 2024-12-09T01:51:33,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T01:51:33,073 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:51:33,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:33,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:33,073 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:51:33,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:51:33,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:51:33,074 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:51:33,074 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:51:33,076 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38509 2024-12-09T01:51:33,078 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38509 connecting to ZooKeeper ensemble=127.0.0.1:63721 2024-12-09T01:51:33,079 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:33,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:33,087 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:385090x0, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:51:33,087 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:33,087 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38509-0x1007479114a0001 connected 2024-12-09T01:51:33,087 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:51:33,088 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:51:33,089 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:51:33,090 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:51:33,091 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38509 2024-12-09T01:51:33,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38509 2024-12-09T01:51:33,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38509 2024-12-09T01:51:33,093 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38509 2024-12-09T01:51:33,093 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38509 2024-12-09T01:51:33,107 DEBUG [M:0;ef6f18c58dc9:37609 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef6f18c58dc9:37609 2024-12-09T01:51:33,108 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef6f18c58dc9,37609,1733709092988 2024-12-09T01:51:33,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:33,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:33,112 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef6f18c58dc9,37609,1733709092988 2024-12-09T01:51:33,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:51:33,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:33,113 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:33,114 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T01:51:33,114 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef6f18c58dc9,37609,1733709092988 from backup master directory 2024-12-09T01:51:33,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef6f18c58dc9,37609,1733709092988 2024-12-09T01:51:33,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:33,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:51:33,116 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:51:33,116 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef6f18c58dc9,37609,1733709092988 2024-12-09T01:51:33,125 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:33,128 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/hbase.id] with ID: 8da61aba-ed05-4c68-b174-441e350d322a 2024-12-09T01:51:33,128 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/.tmp/hbase.id 2024-12-09T01:51:33,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41489 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:51:33,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:51:33,140 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/.tmp/hbase.id]:[hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/hbase.id] 2024-12-09T01:51:33,153 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:33,153 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T01:51:33,155 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T01:51:33,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:33,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:33,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:51:33,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41489 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:51:33,164 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:51:33,165 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T01:51:33,166 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:51:33,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41489 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:51:33,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:51:33,175 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store 2024-12-09T01:51:33,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41489 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:51:33,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:51:33,185 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:33,185 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:51:33,185 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:33,185 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:33,185 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:51:33,185 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:33,185 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:51:33,186 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709093185Disabling compacts and flushes for region at 1733709093185Disabling writes for close at 1733709093185Writing region close event to WAL at 1733709093185Closed at 1733709093185 2024-12-09T01:51:33,186 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/.initializing 2024-12-09T01:51:33,186 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/WALs/ef6f18c58dc9,37609,1733709092988 2024-12-09T01:51:33,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:33,189 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C37609%2C1733709092988, suffix=, logDir=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/WALs/ef6f18c58dc9,37609,1733709092988, archiveDir=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/oldWALs, maxLogs=10 2024-12-09T01:51:33,190 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C37609%2C1733709092988.1733709093189 2024-12-09T01:51:33,195 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/WALs/ef6f18c58dc9,37609,1733709092988/ef6f18c58dc9%2C37609%2C1733709092988.1733709093189 2024-12-09T01:51:33,209 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46509:46509),(127.0.0.1/127.0.0.1:43403:43403)] 2024-12-09T01:51:33,219 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:51:33,219 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:33,219 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:33,219 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:33,221 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:33,223 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T01:51:33,223 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:33,227 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:33,227 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:33,233 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T01:51:33,233 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:33,233 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:51:33,233 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:33,235 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T01:51:33,235 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:33,236 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:51:33,236 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:33,238 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T01:51:33,238 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:33,243 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:51:33,243 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:33,244 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:33,248 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:33,250 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:33,250 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:33,251 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:51:33,252 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:51:33,255 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:51:33,256 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767038, jitterRate=-0.024661973118782043}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:51:33,257 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733709093219Initializing all the Stores at 1733709093221 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709093221Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709093221Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709093221Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709093221Cleaning up temporary data from old regions at 1733709093250 (+29 ms)Region opened successfully at 1733709093257 (+7 ms) 2024-12-09T01:51:33,258 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T01:51:33,262 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bb86d10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:51:33,263 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T01:51:33,263 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T01:51:33,263 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T01:51:33,264 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T01:51:33,264 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T01:51:33,265 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T01:51:33,265 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T01:51:33,268 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T01:51:33,269 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T01:51:33,274 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T01:51:33,275 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T01:51:33,276 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T01:51:33,278 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T01:51:33,278 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T01:51:33,285 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T01:51:33,287 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T01:51:33,288 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T01:51:33,290 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T01:51:33,293 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T01:51:33,296 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T01:51:33,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:33,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:33,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:33,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:33,301 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef6f18c58dc9,37609,1733709092988, sessionid=0x1007479114a0000, setting cluster-up flag (Was=false) 2024-12-09T01:51:33,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:33,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:33,314 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T01:51:33,315 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,37609,1733709092988 2024-12-09T01:51:33,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:33,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:33,325 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T01:51:33,326 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,37609,1733709092988 2024-12-09T01:51:33,339 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T01:51:33,341 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T01:51:33,341 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T01:51:33,341 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T01:51:33,342 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef6f18c58dc9,37609,1733709092988 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T01:51:33,343 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:51:33,343 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:51:33,343 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:51:33,343 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:51:33,344 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef6f18c58dc9:0, corePoolSize=10, maxPoolSize=10 2024-12-09T01:51:33,344 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,344 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:51:33,344 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,352 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:51:33,352 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T01:51:33,354 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:33,354 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T01:51:33,369 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733709123369 2024-12-09T01:51:33,369 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T01:51:33,369 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T01:51:33,369 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T01:51:33,369 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T01:51:33,369 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T01:51:33,369 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T01:51:33,370 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,370 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T01:51:33,371 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T01:51:33,371 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T01:51:33,371 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T01:51:33,371 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T01:51:33,372 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709093371,5,FailOnTimeoutGroup] 2024-12-09T01:51:33,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:51:33,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41489 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:51:33,375 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T01:51:33,375 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709093372,5,FailOnTimeoutGroup] 2024-12-09T01:51:33,375 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,375 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c 2024-12-09T01:51:33,375 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T01:51:33,375 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,375 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,397 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(746): ClusterId : 8da61aba-ed05-4c68-b174-441e350d322a 2024-12-09T01:51:33,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41489 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:51:33,397 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:51:33,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:51:33,399 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:33,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:51:33,402 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:51:33,402 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:51:33,403 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:51:33,403 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:33,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:33,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:51:33,405 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:51:33,406 DEBUG [RS:0;ef6f18c58dc9:38509 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4814e226, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:51:33,407 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:51:33,407 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:33,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:33,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:51:33,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:51:33,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:33,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:33,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:51:33,411 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:51:33,411 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:33,412 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:33,412 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:51:33,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740 2024-12-09T01:51:33,413 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740 2024-12-09T01:51:33,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:51:33,415 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:51:33,420 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:51:33,423 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:51:33,427 DEBUG [RS:0;ef6f18c58dc9:38509 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef6f18c58dc9:38509 2024-12-09T01:51:33,427 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:51:33,427 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:51:33,427 DEBUG [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:51:33,428 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,37609,1733709092988 with port=38509, startcode=1733709093073 2024-12-09T01:51:33,428 DEBUG [RS:0;ef6f18c58dc9:38509 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:51:33,430 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:51:33,431 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=723077, jitterRate=-0.08056032657623291}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:51:33,432 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733709093399Initializing all the Stores at 1733709093400 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709093401 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709093401Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709093401Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709093401Cleaning up temporary data from old regions at 1733709093415 (+14 ms)Region opened successfully at 1733709093432 (+17 ms) 2024-12-09T01:51:33,432 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:51:33,432 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:51:33,432 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:51:33,432 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:51:33,432 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:51:33,433 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:51:33,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709093432Disabling compacts and flushes for region at 1733709093432Disabling writes for close at 1733709093432Writing region close event to WAL at 1733709093433 (+1 ms)Closed at 1733709093433 2024-12-09T01:51:33,435 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57125, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:51:33,435 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37609 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,38509,1733709093073 2024-12-09T01:51:33,435 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37609 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,38509,1733709093073 2024-12-09T01:51:33,435 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:51:33,435 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T01:51:33,436 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T01:51:33,441 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:51:33,442 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T01:51:33,443 DEBUG [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c 2024-12-09T01:51:33,444 DEBUG [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43413 2024-12-09T01:51:33,444 DEBUG [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:51:33,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:51:33,446 DEBUG [RS:0;ef6f18c58dc9:38509 {}] zookeeper.ZKUtil(111): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,38509,1733709093073 2024-12-09T01:51:33,446 WARN [RS:0;ef6f18c58dc9:38509 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:51:33,446 INFO [RS:0;ef6f18c58dc9:38509 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:51:33,446 DEBUG [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073 2024-12-09T01:51:33,447 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,38509,1733709093073] 2024-12-09T01:51:33,451 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:51:33,456 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:51:33,456 INFO [RS:0;ef6f18c58dc9:38509 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:51:33,456 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,460 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:51:33,461 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:51:33,461 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,461 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,461 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,461 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,461 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,461 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,461 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:51:33,461 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,461 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,461 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,462 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,462 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,462 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:51:33,462 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:33,462 DEBUG [RS:0;ef6f18c58dc9:38509 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:51:33,463 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,463 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,463 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,463 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,463 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,464 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,38509,1733709093073-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:51:33,482 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:51:33,482 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,38509,1733709093073-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,482 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,482 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.Replication(171): ef6f18c58dc9,38509,1733709093073 started 2024-12-09T01:51:33,497 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:33,497 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,38509,1733709093073, RpcServer on ef6f18c58dc9/172.17.0.2:38509, sessionid=0x1007479114a0001 2024-12-09T01:51:33,498 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:51:33,498 DEBUG [RS:0;ef6f18c58dc9:38509 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,38509,1733709093073 2024-12-09T01:51:33,498 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,38509,1733709093073' 2024-12-09T01:51:33,498 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:51:33,498 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:51:33,499 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:51:33,499 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:51:33,499 DEBUG [RS:0;ef6f18c58dc9:38509 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,38509,1733709093073 2024-12-09T01:51:33,499 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,38509,1733709093073' 2024-12-09T01:51:33,499 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:51:33,499 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:51:33,500 DEBUG [RS:0;ef6f18c58dc9:38509 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:51:33,500 INFO [RS:0;ef6f18c58dc9:38509 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:51:33,500 INFO [RS:0;ef6f18c58dc9:38509 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:51:33,593 WARN [ef6f18c58dc9:37609 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T01:51:33,603 INFO [RS:0;ef6f18c58dc9:38509 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C38509%2C1733709093073, suffix=, logDir=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073, archiveDir=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/oldWALs, maxLogs=32 2024-12-09T01:51:33,604 INFO [RS:0;ef6f18c58dc9:38509 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 2024-12-09T01:51:33,613 INFO [RS:0;ef6f18c58dc9:38509 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 2024-12-09T01:51:33,619 DEBUG [RS:0;ef6f18c58dc9:38509 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43403:43403),(127.0.0.1/127.0.0.1:46509:46509)] 2024-12-09T01:51:33,843 DEBUG [ef6f18c58dc9:37609 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T01:51:33,844 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef6f18c58dc9,38509,1733709093073 2024-12-09T01:51:33,846 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,38509,1733709093073, state=OPENING 2024-12-09T01:51:33,847 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T01:51:33,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:33,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:33,850 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:51:33,850 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:33,850 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,38509,1733709093073}] 2024-12-09T01:51:33,851 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:33,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:34,004 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T01:51:34,009 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34661, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T01:51:34,015 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T01:51:34,015 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:51:34,017 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C38509%2C1733709093073.meta, suffix=.meta, logDir=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073, archiveDir=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/oldWALs, maxLogs=32 2024-12-09T01:51:34,018 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709094018.meta 2024-12-09T01:51:34,027 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709094018.meta 2024-12-09T01:51:34,036 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46509:46509),(127.0.0.1/127.0.0.1:43403:43403)] 2024-12-09T01:51:34,040 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:51:34,040 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T01:51:34,041 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T01:51:34,041 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T01:51:34,041 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T01:51:34,041 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:34,041 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T01:51:34,041 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T01:51:34,044 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:51:34,045 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:51:34,045 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:34,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:34,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:51:34,047 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:51:34,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:34,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:34,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:51:34,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:51:34,050 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:34,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:34,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:51:34,052 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:51:34,052 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:34,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:51:34,053 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:51:34,054 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740 2024-12-09T01:51:34,056 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740 2024-12-09T01:51:34,058 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:51:34,058 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:51:34,059 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:51:34,060 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:51:34,061 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777798, jitterRate=-0.010979682207107544}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:51:34,061 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T01:51:34,062 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733709094041Writing region info on filesystem at 1733709094041Initializing all the Stores at 1733709094042 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709094042Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709094044 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709094044Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709094044Cleaning up temporary data from old regions at 1733709094058 (+14 ms)Running coprocessor post-open hooks at 1733709094061 (+3 ms)Region opened successfully at 1733709094062 (+1 ms) 2024-12-09T01:51:34,071 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733709094004 2024-12-09T01:51:34,074 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T01:51:34,075 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T01:51:34,075 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,38509,1733709093073 2024-12-09T01:51:34,077 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,38509,1733709093073, state=OPEN 2024-12-09T01:51:34,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:51:34,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:51:34,082 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,38509,1733709093073 2024-12-09T01:51:34,082 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:34,082 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:51:34,086 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T01:51:34,086 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,38509,1733709093073 in 232 msec 2024-12-09T01:51:34,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T01:51:34,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 651 msec 2024-12-09T01:51:34,090 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:51:34,090 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T01:51:34,092 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:51:34,092 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,38509,1733709093073, seqNum=-1] 2024-12-09T01:51:34,093 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:51:34,094 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50559, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:51:34,102 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 760 msec 2024-12-09T01:51:34,102 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733709094102, completionTime=-1 2024-12-09T01:51:34,103 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T01:51:34,103 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T01:51:34,105 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T01:51:34,105 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733709154105 2024-12-09T01:51:34,105 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733709214105 2024-12-09T01:51:34,105 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T01:51:34,106 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,37609,1733709092988-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:34,106 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,37609,1733709092988-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:34,106 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,37609,1733709092988-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:34,106 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef6f18c58dc9:37609, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:34,106 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:34,106 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:34,108 DEBUG [master/ef6f18c58dc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T01:51:34,111 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.995sec 2024-12-09T01:51:34,112 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T01:51:34,112 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T01:51:34,112 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T01:51:34,112 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T01:51:34,112 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T01:51:34,112 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,37609,1733709092988-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:51:34,112 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,37609,1733709092988-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T01:51:34,115 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T01:51:34,115 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T01:51:34,115 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,37609,1733709092988-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:51:34,126 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:34,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:34,196 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6144f613, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:51:34,196 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ef6f18c58dc9,37609,-1 for getting cluster id 2024-12-09T01:51:34,197 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T01:51:34,199 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8da61aba-ed05-4c68-b174-441e350d322a' 2024-12-09T01:51:34,200 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T01:51:34,200 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8da61aba-ed05-4c68-b174-441e350d322a" 2024-12-09T01:51:34,200 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@756904d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:51:34,200 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ef6f18c58dc9,37609,-1] 2024-12-09T01:51:34,201 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T01:51:34,201 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:34,203 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35106, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T01:51:34,204 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62cd82cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:51:34,205 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:51:34,206 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,38509,1733709093073, seqNum=-1] 2024-12-09T01:51:34,206 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:51:34,208 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40126, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:51:34,212 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ef6f18c58dc9,37609,1733709092988 2024-12-09T01:51:34,212 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:51:34,216 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T01:51:34,216 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-12-09T01:51:34,216 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-12-09T01:51:34,217 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T01:51:34,218 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is ef6f18c58dc9,37609,1733709092988 2024-12-09T01:51:34,218 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@de603c 2024-12-09T01:51:34,218 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T01:51:34,224 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T01:51:34,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37609 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T01:51:34,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37609 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T01:51:34,226 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37609 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:51:34,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37609 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T01:51:34,229 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T01:51:34,229 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:34,229 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37609 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-12-09T01:51:34,231 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T01:51:34,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37609 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:51:34,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741835_1011 (size=395) 2024-12-09T01:51:34,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41489 is added to blk_1073741835_1011 (size=395) 2024-12-09T01:51:34,249 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 32afcd71342a11f90a484ea7fca2803d, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c 2024-12-09T01:51:34,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41489 is added to blk_1073741836_1012 (size=78) 2024-12-09T01:51:34,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43857 is added to blk_1073741836_1012 (size=78) 2024-12-09T01:51:34,257 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:34,258 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 32afcd71342a11f90a484ea7fca2803d, disabling compactions & flushes 2024-12-09T01:51:34,258 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:34,258 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:34,258 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. after waiting 0 ms 2024-12-09T01:51:34,258 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:34,258 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:34,258 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 32afcd71342a11f90a484ea7fca2803d: Waiting for close lock at 1733709094258Disabling compacts and flushes for region at 1733709094258Disabling writes for close at 1733709094258Writing region close event to WAL at 1733709094258Closed at 1733709094258 2024-12-09T01:51:34,259 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T01:51:34,260 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1733709094259"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733709094259"}]},"ts":"1733709094259"} 2024-12-09T01:51:34,263 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T01:51:34,264 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T01:51:34,264 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733709094264"}]},"ts":"1733709094264"} 2024-12-09T01:51:34,266 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-12-09T01:51:34,266 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=32afcd71342a11f90a484ea7fca2803d, ASSIGN}] 2024-12-09T01:51:34,268 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=32afcd71342a11f90a484ea7fca2803d, ASSIGN 2024-12-09T01:51:34,269 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=32afcd71342a11f90a484ea7fca2803d, ASSIGN; state=OFFLINE, location=ef6f18c58dc9,38509,1733709093073; forceNewPlan=false, retain=false 2024-12-09T01:51:34,420 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=32afcd71342a11f90a484ea7fca2803d, regionState=OPENING, regionLocation=ef6f18c58dc9,38509,1733709093073 2024-12-09T01:51:34,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=32afcd71342a11f90a484ea7fca2803d, ASSIGN because future has completed 2024-12-09T01:51:34,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 32afcd71342a11f90a484ea7fca2803d, server=ef6f18c58dc9,38509,1733709093073}] 2024-12-09T01:51:34,581 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:34,581 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 32afcd71342a11f90a484ea7fca2803d, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:51:34,582 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:34,582 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:51:34,582 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:34,582 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:34,583 INFO [StoreOpener-32afcd71342a11f90a484ea7fca2803d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:34,585 INFO [StoreOpener-32afcd71342a11f90a484ea7fca2803d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 32afcd71342a11f90a484ea7fca2803d columnFamilyName info 2024-12-09T01:51:34,585 DEBUG [StoreOpener-32afcd71342a11f90a484ea7fca2803d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:51:34,586 INFO [StoreOpener-32afcd71342a11f90a484ea7fca2803d-1 {}] regionserver.HStore(327): Store=32afcd71342a11f90a484ea7fca2803d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:51:34,586 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:34,586 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/default/TestLogRolling-testLogRollOnPipelineRestart/32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:34,587 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/default/TestLogRolling-testLogRollOnPipelineRestart/32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:34,587 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:34,587 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:34,589 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:34,591 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/default/TestLogRolling-testLogRollOnPipelineRestart/32afcd71342a11f90a484ea7fca2803d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:51:34,592 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 32afcd71342a11f90a484ea7fca2803d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=872686, jitterRate=0.10967868566513062}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T01:51:34,592 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:34,592 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 32afcd71342a11f90a484ea7fca2803d: Running coprocessor pre-open hook at 1733709094582Writing region info on filesystem at 1733709094582Initializing all the Stores at 1733709094583 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709094583Cleaning up temporary data from old regions at 1733709094587 (+4 ms)Running coprocessor post-open hooks at 1733709094592 (+5 ms)Region opened successfully at 1733709094592 2024-12-09T01:51:34,593 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d., pid=6, masterSystemTime=1733709094577 2024-12-09T01:51:34,596 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:34,596 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:34,598 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=32afcd71342a11f90a484ea7fca2803d, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,38509,1733709093073 2024-12-09T01:51:34,600 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 32afcd71342a11f90a484ea7fca2803d, server=ef6f18c58dc9,38509,1733709093073 because future has completed 2024-12-09T01:51:34,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T01:51:34,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 32afcd71342a11f90a484ea7fca2803d, server=ef6f18c58dc9,38509,1733709093073 in 179 msec 2024-12-09T01:51:34,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T01:51:34,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=32afcd71342a11f90a484ea7fca2803d, ASSIGN in 338 msec 2024-12-09T01:51:34,608 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T01:51:34,608 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733709094608"}]},"ts":"1733709094608"} 2024-12-09T01:51:34,610 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-12-09T01:51:34,611 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T01:51:34,614 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 385 msec 2024-12-09T01:51:34,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:35,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:35,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:35,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:36,127 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:36,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:36,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:37,128 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:37,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:37,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:38,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,081 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,082 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,086 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,088 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:38,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:38,592 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T01:51:38,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,616 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,618 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,623 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:51:38,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:39,129 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:39,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:39,451 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T01:51:39,452 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-12-09T01:51:39,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:40,130 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:40,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:40,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:41,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:41,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:41,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:42,131 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:42,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:42,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:43,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T01:51:43,060 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T01:51:43,061 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T01:51:43,061 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-12-09T01:51:43,062 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:51:43,062 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T01:51:43,062 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-09T01:51:43,062 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-09T01:51:43,132 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:43,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:43,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:44,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:44,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:44,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37609 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:51:44,285 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-12-09T01:51:44,285 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-12-09T01:51:44,288 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T01:51:44,288 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:44,292 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d., hostname=ef6f18c58dc9,38509,1733709093073, seqNum=2] 2024-12-09T01:51:44,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:45,133 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:45,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:45,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:46,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:46,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:46,294 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 2024-12-09T01:51:46,295 WARN [ResponseProcessor for block BP-2069266944-172.17.0.2-1733709091959:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2069266944-172.17.0.2-1733709091959:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-2069266944-172.17.0.2-1733709091959:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:41489,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:46,296 WARN [ResponseProcessor for block BP-2069266944-172.17.0.2-1733709091959:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2069266944-172.17.0.2-1733709091959:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:46,296 WARN [DataStreamer for file /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709094018.meta block BP-2069266944-172.17.0.2-1733709091959:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2069266944-172.17.0.2-1733709091959:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK], DatanodeInfoWithStorage[127.0.0.1:41489,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41489,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK]) is bad. 2024-12-09T01:51:46,296 WARN [ResponseProcessor for block BP-2069266944-172.17.0.2-1733709091959:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2069266944-172.17.0.2-1733709091959:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-2069266944-172.17.0.2-1733709091959:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:41489,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:46,296 WARN [PacketResponder: BP-2069266944-172.17.0.2-1733709091959:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41489] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:46,297 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523824117_22 at /127.0.0.1:46224 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41489:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46224 dst: /127.0.0.1:41489 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:46,297 WARN [PacketResponder: BP-2069266944-172.17.0.2-1733709091959:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:41489] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:46,297 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-17092419_22 at /127.0.0.1:50558 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50558 dst: /127.0.0.1:43857 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:46,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-17092419_22 at /127.0.0.1:46200 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:41489:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46200 dst: /127.0.0.1:41489 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:46,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523824117_22 at /127.0.0.1:50588 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50588 dst: /127.0.0.1:43857 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:46,298 WARN [DataStreamer for file /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/WALs/ef6f18c58dc9,37609,1733709092988/ef6f18c58dc9%2C37609%2C1733709092988.1733709093189 block BP-2069266944-172.17.0.2-1733709091959:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2069266944-172.17.0.2-1733709091959:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK], DatanodeInfoWithStorage[127.0.0.1:41489,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:41489,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK]) is bad. 2024-12-09T01:51:46,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49d720be{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:46,297 WARN [DataStreamer for file /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 block BP-2069266944-172.17.0.2-1733709091959:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2069266944-172.17.0.2-1733709091959:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:41489,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK], DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:41489,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK]) is bad. 2024-12-09T01:51:46,299 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@37fa3e5a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:46,299 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:46,299 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23fcbb95{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:46,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7c7a3196{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:46,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523824117_22 at /127.0.0.1:46238 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:41489:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46238 dst: /127.0.0.1:41489 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:46,299 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523824117_22 at /127.0.0.1:50592 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50592 dst: /127.0.0.1:43857 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:46,301 WARN [BP-2069266944-172.17.0.2-1733709091959 heartbeating to localhost/127.0.0.1:43413 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:46,301 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:46,301 WARN [BP-2069266944-172.17.0.2-1733709091959 heartbeating to localhost/127.0.0.1:43413 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2069266944-172.17.0.2-1733709091959 (Datanode Uuid 0e7f2e3c-18d3-4a81-975d-83e740a7cd7b) service to localhost/127.0.0.1:43413 2024-12-09T01:51:46,301 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:46,302 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data4/current/BP-2069266944-172.17.0.2-1733709091959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:46,303 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:46,304 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data3/current/BP-2069266944-172.17.0.2-1733709091959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:46,321 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:46,325 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:46,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:46,326 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:46,326 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:51:46,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6565628b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:46,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70644e5e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:46,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@235b3635{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/java.io.tmpdir/jetty-localhost-36227-hadoop-hdfs-3_4_1-tests_jar-_-any-8958090869898942819/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:46,453 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2786a051{HTTP/1.1, (http/1.1)}{localhost:36227} 2024-12-09T01:51:46,453 INFO [Time-limited test {}] server.Server(415): Started @169190ms 2024-12-09T01:51:46,455 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:46,477 WARN [ResponseProcessor for block BP-2069266944-172.17.0.2-1733709091959:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2069266944-172.17.0.2-1733709091959:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:46,476 WARN [ResponseProcessor for block BP-2069266944-172.17.0.2-1733709091959:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2069266944-172.17.0.2-1733709091959:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:46,477 WARN [ResponseProcessor for block BP-2069266944-172.17.0.2-1733709091959:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2069266944-172.17.0.2-1733709091959:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:46,477 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-17092419_22 at /127.0.0.1:44746 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44746 dst: /127.0.0.1:43857 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:46,477 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523824117_22 at /127.0.0.1:44726 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44726 dst: /127.0.0.1:43857 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:46,477 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523824117_22 at /127.0.0.1:44732 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44732 dst: /127.0.0.1:43857 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:46,484 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@354edf1e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:46,485 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2aa0e69d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:46,485 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:46,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@444db7a8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:46,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@29ec962a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:46,486 WARN [BP-2069266944-172.17.0.2-1733709091959 heartbeating to localhost/127.0.0.1:43413 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:46,486 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:46,486 WARN [BP-2069266944-172.17.0.2-1733709091959 heartbeating to localhost/127.0.0.1:43413 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2069266944-172.17.0.2-1733709091959 (Datanode Uuid c141504a-e754-4899-983b-758e64bcb62c) service to localhost/127.0.0.1:43413 2024-12-09T01:51:46,486 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:46,488 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data1/current/BP-2069266944-172.17.0.2-1733709091959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:46,488 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data2/current/BP-2069266944-172.17.0.2-1733709091959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:46,488 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:46,496 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:46,500 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:46,501 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:46,501 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:46,501 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:51:46,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dc5f7b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:46,502 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d692efe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:46,583 WARN [Thread-1355 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:46,586 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6a9c44e0f0329235 with lease ID 0xb83ed98518f626fb: from storage DS-298ad5c7-a290-48e8-8f58-473fad29767e node DatanodeRegistration(127.0.0.1:37749, datanodeUuid=0e7f2e3c-18d3-4a81-975d-83e740a7cd7b, infoPort=45273, infoSecurePort=0, ipcPort=37089, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T01:51:46,586 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6a9c44e0f0329235 with lease ID 0xb83ed98518f626fb: from storage DS-89fa7659-64fa-487f-a16f-fb331d050fc3 node DatanodeRegistration(127.0.0.1:37749, datanodeUuid=0e7f2e3c-18d3-4a81-975d-83e740a7cd7b, infoPort=45273, infoSecurePort=0, ipcPort=37089, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:46,659 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@673d3bba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/java.io.tmpdir/jetty-localhost-35193-hadoop-hdfs-3_4_1-tests_jar-_-any-12041284831041684371/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:46,659 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@14c76857{HTTP/1.1, (http/1.1)}{localhost:35193} 2024-12-09T01:51:46,659 INFO [Time-limited test {}] server.Server(415): Started @169397ms 2024-12-09T01:51:46,661 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:46,767 WARN [Thread-1386 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:46,769 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4beb78b9e14a49c8 with lease ID 0xb83ed98518f626fc: from storage DS-cedbe907-b73d-43f0-88eb-987ea205b350 node DatanodeRegistration(127.0.0.1:46201, datanodeUuid=c141504a-e754-4899-983b-758e64bcb62c, infoPort=40269, infoSecurePort=0, ipcPort=40183, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:46,769 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4beb78b9e14a49c8 with lease ID 0xb83ed98518f626fc: from storage DS-3c113729-ddd4-42ba-9884-35ce8b3e3ffa node DatanodeRegistration(127.0.0.1:46201, datanodeUuid=c141504a-e754-4899-983b-758e64bcb62c, infoPort=40269, infoSecurePort=0, ipcPort=40183, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:46,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:47,134 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:47,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:47,698 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-12-09T01:51:47,700 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-12-09T01:51:47,702 ERROR [FSHLog-0-hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c-prefix:ef6f18c58dc9,38509,1733709093073 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:47,702 WARN [FSHLog-0-hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c-prefix:ef6f18c58dc9,38509,1733709093073 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:47,702 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C38509%2C1733709093073:(num 1733709093604) roll requested 2024-12-09T01:51:47,702 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 2024-12-09T01:51:47,709 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 newFile=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 2024-12-09T01:51:47,709 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:47,709 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:47,709 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:47,709 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:47,709 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:47,710 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 2024-12-09T01:51:47,710 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:47,710 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:47,710 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 2024-12-09T01:51:47,710 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40269:40269),(127.0.0.1/127.0.0.1:45273:45273)] 2024-12-09T01:51:47,711 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 is not closed yet, will try archiving it next time 2024-12-09T01:51:47,711 WARN [IPC Server handler 1 on default port 43413 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-12-09T01:51:47,711 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 after 1ms 2024-12-09T01:51:47,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46201 is added to blk_1073741833_1017 (size=1632) 2024-12-09T01:51:47,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:48,135 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:48,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:48,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:49,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:49,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:49,714 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-12-09T01:51:49,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:50,136 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:50,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:50,585 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T01:51:50,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:51,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:51,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:51,712 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 after 4001ms 2024-12-09T01:51:51,717 WARN [ResponseProcessor for block BP-2069266944-172.17.0.2-1733709091959:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2069266944-172.17.0.2-1733709091959:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:51,718 WARN [DataStreamer for file /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 block BP-2069266944-172.17.0.2-1733709091959:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2069266944-172.17.0.2-1733709091959:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46201,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK], DatanodeInfoWithStorage[127.0.0.1:37749,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46201,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]) is bad. 2024-12-09T01:51:51,718 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523824117_22 at /127.0.0.1:48098 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:46201:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48098 dst: /127.0.0.1:46201 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:51,718 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523824117_22 at /127.0.0.1:51436 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37749:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51436 dst: /127.0.0.1:37749 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:51,720 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@673d3bba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:51,720 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@14c76857{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:51,720 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:51,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d692efe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:51,721 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dc5f7b3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:51,723 WARN [BP-2069266944-172.17.0.2-1733709091959 heartbeating to localhost/127.0.0.1:43413 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:51,723 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:51,723 WARN [BP-2069266944-172.17.0.2-1733709091959 heartbeating to localhost/127.0.0.1:43413 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2069266944-172.17.0.2-1733709091959 (Datanode Uuid c141504a-e754-4899-983b-758e64bcb62c) service to localhost/127.0.0.1:43413 2024-12-09T01:51:51,723 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:51,723 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data1/current/BP-2069266944-172.17.0.2-1733709091959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:51,724 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data2/current/BP-2069266944-172.17.0.2-1733709091959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:51,724 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:51,734 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:51,738 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:51,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:51,741 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:51,741 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:51:51,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75f58649{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:51,742 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@571de0fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:51,857 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@45604664{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/java.io.tmpdir/jetty-localhost-37855-hadoop-hdfs-3_4_1-tests_jar-_-any-11895319638297021621/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:51,858 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6e793ffb{HTTP/1.1, (http/1.1)}{localhost:37855} 2024-12-09T01:51:51,858 INFO [Time-limited test {}] server.Server(415): Started @174595ms 2024-12-09T01:51:51,859 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:51,884 WARN [ResponseProcessor for block BP-2069266944-172.17.0.2-1733709091959:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2069266944-172.17.0.2-1733709091959:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:51,885 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_523824117_22 at /127.0.0.1:51448 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:37749:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51448 dst: /127.0.0.1:37749 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:51,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@235b3635{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:51,893 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2786a051{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:51:51,893 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:51:51,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70644e5e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:51:51,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6565628b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,STOPPED} 2024-12-09T01:51:51,895 WARN [BP-2069266944-172.17.0.2-1733709091959 heartbeating to localhost/127.0.0.1:43413 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:51:51,895 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:51:51,895 WARN [BP-2069266944-172.17.0.2-1733709091959 heartbeating to localhost/127.0.0.1:43413 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2069266944-172.17.0.2-1733709091959 (Datanode Uuid 0e7f2e3c-18d3-4a81-975d-83e740a7cd7b) service to localhost/127.0.0.1:43413 2024-12-09T01:51:51,895 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:51:51,895 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data3/current/BP-2069266944-172.17.0.2-1733709091959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:51,896 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data4/current/BP-2069266944-172.17.0.2-1733709091959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:51:51,896 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:51:51,909 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:51:51,912 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:51:51,913 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:51:51,913 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:51:51,913 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:51:51,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ede944f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:51:51,914 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17312068{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:51:51,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:51,966 WARN [Thread-1429 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:51,971 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa80625bb00de193d with lease ID 0xb83ed98518f626fd: from storage DS-cedbe907-b73d-43f0-88eb-987ea205b350 node DatanodeRegistration(127.0.0.1:46857, datanodeUuid=c141504a-e754-4899-983b-758e64bcb62c, infoPort=39963, infoSecurePort=0, ipcPort=38535, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:51,971 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa80625bb00de193d with lease ID 0xb83ed98518f626fd: from storage DS-3c113729-ddd4-42ba-9884-35ce8b3e3ffa node DatanodeRegistration(127.0.0.1:46857, datanodeUuid=c141504a-e754-4899-983b-758e64bcb62c, infoPort=39963, infoSecurePort=0, ipcPort=38535, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:52,053 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7ff0f915{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/java.io.tmpdir/jetty-localhost-45155-hadoop-hdfs-3_4_1-tests_jar-_-any-2832403469616538182/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:51:52,053 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58a9274b{HTTP/1.1, (http/1.1)}{localhost:45155} 2024-12-09T01:51:52,053 INFO [Time-limited test {}] server.Server(415): Started @174791ms 2024-12-09T01:51:52,055 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:51:52,137 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:52,197 WARN [Thread-1460 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:51:52,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:52,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6403782ae802685 with lease ID 0xb83ed98518f626fe: from storage DS-298ad5c7-a290-48e8-8f58-473fad29767e node DatanodeRegistration(127.0.0.1:35007, datanodeUuid=0e7f2e3c-18d3-4a81-975d-83e740a7cd7b, infoPort=33745, infoSecurePort=0, ipcPort=46081, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:52,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6403782ae802685 with lease ID 0xb83ed98518f626fe: from storage DS-89fa7659-64fa-487f-a16f-fb331d050fc3 node DatanodeRegistration(127.0.0.1:35007, datanodeUuid=0e7f2e3c-18d3-4a81-975d-83e740a7cd7b, infoPort=33745, infoSecurePort=0, ipcPort=46081, storageInfo=lv=-57;cid=testClusterID;nsid=1938261524;c=1733709091959), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:51:52,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:53,103 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-12-09T01:51:53,105 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-12-09T01:51:53,106 ERROR [FSHLog-0-hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c-prefix:ef6f18c58dc9,38509,1733709093073 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37749,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:53,106 WARN [FSHLog-0-hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c-prefix:ef6f18c58dc9,38509,1733709093073 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37749,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:53,106 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C38509%2C1733709093073:(num 1733709107702) roll requested 2024-12-09T01:51:53,107 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C38509%2C1733709093073.1733709113106 2024-12-09T01:51:53,112 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 newFile=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709113106 2024-12-09T01:51:53,112 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:53,112 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:53,113 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:53,113 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:53,113 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:53,113 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709113106 2024-12-09T01:51:53,113 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37749,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:53,113 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37749,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:53,113 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 2024-12-09T01:51:53,114 WARN [IPC Server handler 0 on default port 43413 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-12-09T01:51:53,114 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 after 1ms 2024-12-09T01:51:53,115 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39963:39963),(127.0.0.1/127.0.0.1:33745:33745)] 2024-12-09T01:51:53,115 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 is not closed yet, will try archiving it next time 2024-12-09T01:51:53,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:53,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:53,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:53,970 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T01:51:54,138 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:54,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:54,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:55,117 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 2024-12-09T01:51:55,123 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709113106 newFile=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 2024-12-09T01:51:55,123 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:55,123 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:55,124 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:55,124 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:55,124 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:55,124 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709113106 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 2024-12-09T01:51:55,125 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39963:39963),(127.0.0.1/127.0.0.1:33745:33745)] 2024-12-09T01:51:55,125 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 is not closed yet, will try archiving it next time 2024-12-09T01:51:55,125 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709113106 is not closed yet, will try archiving it next time 2024-12-09T01:51:55,125 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 2024-12-09T01:51:55,125 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 2024-12-09T01:51:55,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741838_1019 (size=1264) 2024-12-09T01:51:55,126 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 after 1ms 2024-12-09T01:51:55,126 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 2024-12-09T01:51:55,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741838_1019 (size=1264) 2024-12-09T01:51:55,128 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 is not closed yet, will try archiving it next time 2024-12-09T01:51:55,135 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1733709094592/Put/vlen=218/seqid=0] 2024-12-09T01:51:55,135 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1733709104293/Put/vlen=1045/seqid=0] 2024-12-09T01:51:55,136 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709093604 2024-12-09T01:51:55,136 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 2024-12-09T01:51:55,136 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 2024-12-09T01:51:55,136 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 after 0ms 2024-12-09T01:51:55,136 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 2024-12-09T01:51:55,139 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:55,139 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1733709107702/Put/vlen=1045/seqid=0] 2024-12-09T01:51:55,139 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1733709109715/Put/vlen=1045/seqid=0] 2024-12-09T01:51:55,140 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 2024-12-09T01:51:55,140 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709113106 2024-12-09T01:51:55,140 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709113106 2024-12-09T01:51:55,140 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709113106 after 0ms 2024-12-09T01:51:55,140 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709113106 2024-12-09T01:51:55,143 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1733709113106/Put/vlen=1045/seqid=0] 2024-12-09T01:51:55,143 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 2024-12-09T01:51:55,143 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 2024-12-09T01:51:55,143 WARN [IPC Server handler 3 on default port 43413 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-12-09T01:51:55,144 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 after 1ms 2024-12-09T01:51:55,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:55,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:55,971 WARN [ResponseProcessor for block BP-2069266944-172.17.0.2-1733709091959:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2069266944-172.17.0.2-1733709091959:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:55,971 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-17092419_22 at /127.0.0.1:48298 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46857:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48298 dst: /127.0.0.1:46857 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:46857 remote=/127.0.0.1:48298]. Total timeout mills is 60000, 59151 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:55,972 WARN [DataStreamer for file /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 block BP-2069266944-172.17.0.2-1733709091959:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2069266944-172.17.0.2-1733709091959:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK], DatanodeInfoWithStorage[127.0.0.1:35007,DS-298ad5c7-a290-48e8-8f58-473fad29767e,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]) is bad. 2024-12-09T01:51:55,971 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-17092419_22 at /127.0.0.1:46856 [Receiving block BP-2069266944-172.17.0.2-1733709091959:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:35007:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46856 dst: /127.0.0.1:35007 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:51:55,972 WARN [DataStreamer for file /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 block BP-2069266944-172.17.0.2-1733709091959:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2069266944-172.17.0.2-1733709091959:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:55,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741839_1022 (size=85) 2024-12-09T01:51:55,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741839_1022 (size=85) 2024-12-09T01:51:56,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:56,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:56,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:57,115 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709107702 after 4002ms 2024-12-09T01:51:57,140 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:57,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:57,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:58,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:58,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:58,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:59,141 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:59,144 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 after 4001ms 2024-12-09T01:51:59,144 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 2024-12-09T01:51:59,148 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 2024-12-09T01:51:59,149 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 32afcd71342a11f90a484ea7fca2803d 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-12-09T01:51:59,149 ERROR [FSHLog-0-hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c-prefix:ef6f18c58dc9,38509,1733709093073 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2069266944-172.17.0.2-1733709091959:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:59,149 WARN [FSHLog-0-hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c-prefix:ef6f18c58dc9,38509,1733709093073 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2069266944-172.17.0.2-1733709091959:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:59,150 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C38509%2C1733709093073:(num 1733709115116) roll requested 2024-12-09T01:51:59,150 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C38509%2C1733709093073.1733709119150 2024-12-09T01:51:59,155 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 newFile=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709119150 2024-12-09T01:51:59,155 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,155 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,155 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,155 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,155 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,155 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709119150 2024-12-09T01:51:59,155 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2069266944-172.17.0.2-1733709091959:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:59,156 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-2069266944-172.17.0.2-1733709091959:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy45.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy46.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor102.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:59,156 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 2024-12-09T01:51:59,156 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 after 0ms 2024-12-09T01:51:59,157 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33745:33745),(127.0.0.1/127.0.0.1:39963:39963)] 2024-12-09T01:51:59,157 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 to hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/oldWALs/ef6f18c58dc9%2C38509%2C1733709093073.1733709115116 2024-12-09T01:51:59,172 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/default/TestLogRolling-testLogRollOnPipelineRestart/32afcd71342a11f90a484ea7fca2803d/.tmp/info/5eb3c65f5c444f7db6383d955f37240f is 1080, key is row1002/info:/1733709104293/Put/seqid=0 2024-12-09T01:51:59,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741841_1024 (size=9270) 2024-12-09T01:51:59,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741841_1024 (size=9270) 2024-12-09T01:51:59,177 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/default/TestLogRolling-testLogRollOnPipelineRestart/32afcd71342a11f90a484ea7fca2803d/.tmp/info/5eb3c65f5c444f7db6383d955f37240f 2024-12-09T01:51:59,185 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/default/TestLogRolling-testLogRollOnPipelineRestart/32afcd71342a11f90a484ea7fca2803d/.tmp/info/5eb3c65f5c444f7db6383d955f37240f as hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/default/TestLogRolling-testLogRollOnPipelineRestart/32afcd71342a11f90a484ea7fca2803d/info/5eb3c65f5c444f7db6383d955f37240f 2024-12-09T01:51:59,191 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/default/TestLogRolling-testLogRollOnPipelineRestart/32afcd71342a11f90a484ea7fca2803d/info/5eb3c65f5c444f7db6383d955f37240f, entries=4, sequenceid=8, filesize=9.1 K 2024-12-09T01:51:59,192 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 32afcd71342a11f90a484ea7fca2803d in 44ms, sequenceid=8, compaction requested=false 2024-12-09T01:51:59,192 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 32afcd71342a11f90a484ea7fca2803d: 2024-12-09T01:51:59,192 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-12-09T01:51:59,193 ERROR [FSHLog-0-hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c-prefix:ef6f18c58dc9,38509,1733709093073.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:59,193 WARN [FSHLog-0-hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c-prefix:ef6f18c58dc9,38509,1733709093073.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:59,193 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C38509%2C1733709093073.meta:.meta(num 1733709094018) roll requested 2024-12-09T01:51:59,193 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709119193.meta 2024-12-09T01:51:59,198 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,198 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,198 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,198 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,198 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,198 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709094018.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709119193.meta 2024-12-09T01:51:59,199 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:59,199 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:51:59,199 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709094018.meta 2024-12-09T01:51:59,199 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39963:39963),(127.0.0.1/127.0.0.1:33745:33745)] 2024-12-09T01:51:59,199 DEBUG [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709094018.meta is not closed yet, will try archiving it next time 2024-12-09T01:51:59,199 WARN [IPC Server handler 1 on default port 43413 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709094018.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-12-09T01:51:59,200 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709094018.meta after 1ms 2024-12-09T01:51:59,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:51:59,221 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/.tmp/info/c1ef329804614146b05a17f35d38b322 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d./info:regioninfo/1733709094597/Put/seqid=0 2024-12-09T01:51:59,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741843_1027 (size=7125) 2024-12-09T01:51:59,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741843_1027 (size=7125) 2024-12-09T01:51:59,227 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/.tmp/info/c1ef329804614146b05a17f35d38b322 2024-12-09T01:51:59,253 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/.tmp/ns/e3b663ee8d384484a46dfc4da7f595ec is 43, key is default/ns:d/1733709094095/Put/seqid=0 2024-12-09T01:51:59,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741844_1028 (size=5153) 2024-12-09T01:51:59,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741844_1028 (size=5153) 2024-12-09T01:51:59,258 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/.tmp/ns/e3b663ee8d384484a46dfc4da7f595ec 2024-12-09T01:51:59,278 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/.tmp/table/bf48a1dab35742f3bb87fde4b5fb4c66 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1733709094608/Put/seqid=0 2024-12-09T01:51:59,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741845_1029 (size=5438) 2024-12-09T01:51:59,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741845_1029 (size=5438) 2024-12-09T01:51:59,283 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/.tmp/table/bf48a1dab35742f3bb87fde4b5fb4c66 2024-12-09T01:51:59,288 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/.tmp/info/c1ef329804614146b05a17f35d38b322 as hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/info/c1ef329804614146b05a17f35d38b322 2024-12-09T01:51:59,293 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/info/c1ef329804614146b05a17f35d38b322, entries=10, sequenceid=11, filesize=7.0 K 2024-12-09T01:51:59,294 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/.tmp/ns/e3b663ee8d384484a46dfc4da7f595ec as hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/ns/e3b663ee8d384484a46dfc4da7f595ec 2024-12-09T01:51:59,299 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/ns/e3b663ee8d384484a46dfc4da7f595ec, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T01:51:59,300 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/.tmp/table/bf48a1dab35742f3bb87fde4b5fb4c66 as hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/table/bf48a1dab35742f3bb87fde4b5fb4c66 2024-12-09T01:51:59,304 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/table/bf48a1dab35742f3bb87fde4b5fb4c66, entries=2, sequenceid=11, filesize=5.3 K 2024-12-09T01:51:59,305 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 113ms, sequenceid=11, compaction requested=false 2024-12-09T01:51:59,306 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T01:51:59,310 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T01:51:59,311 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:51:59,311 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:59,311 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:59,311 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:59,311 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T01:51:59,311 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T01:51:59,311 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1643342374, stopped=false 2024-12-09T01:51:59,311 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef6f18c58dc9,37609,1733709092988 2024-12-09T01:51:59,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:59,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:51:59,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:59,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:51:59,313 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:51:59,313 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:51:59,313 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:59,313 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:59,313 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,38509,1733709093073' ***** 2024-12-09T01:51:59,314 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:51:59,314 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:59,314 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:51:59,314 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:51:59,314 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:51:59,314 INFO [RS:0;ef6f18c58dc9:38509 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:51:59,314 INFO [RS:0;ef6f18c58dc9:38509 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:51:59,315 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(3091): Received CLOSE for 32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:59,315 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,38509,1733709093073 2024-12-09T01:51:59,315 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:51:59,315 INFO [RS:0;ef6f18c58dc9:38509 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef6f18c58dc9:38509. 2024-12-09T01:51:59,315 DEBUG [RS:0;ef6f18c58dc9:38509 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:51:59,315 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 32afcd71342a11f90a484ea7fca2803d, disabling compactions & flushes 2024-12-09T01:51:59,315 DEBUG [RS:0;ef6f18c58dc9:38509 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:51:59,315 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:59,315 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:59,315 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:51:59,315 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:51:59,315 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. after waiting 0 ms 2024-12-09T01:51:59,315 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:51:59,315 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:59,315 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T01:51:59,316 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T01:51:59,316 DEBUG [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(1325): Online Regions={32afcd71342a11f90a484ea7fca2803d=TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d., 1588230740=hbase:meta,,1.1588230740} 2024-12-09T01:51:59,316 DEBUG [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 32afcd71342a11f90a484ea7fca2803d 2024-12-09T01:51:59,316 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:51:59,316 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:51:59,316 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:51:59,316 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:51:59,316 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:51:59,320 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/default/TestLogRolling-testLogRollOnPipelineRestart/32afcd71342a11f90a484ea7fca2803d/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-12-09T01:51:59,321 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T01:51:59,321 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:59,321 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 32afcd71342a11f90a484ea7fca2803d: Waiting for close lock at 1733709119315Running coprocessor pre-close hooks at 1733709119315Disabling compacts and flushes for region at 1733709119315Disabling writes for close at 1733709119315Writing region close event to WAL at 1733709119316 (+1 ms)Running coprocessor post-close hooks at 1733709119321 (+5 ms)Closed at 1733709119321 2024-12-09T01:51:59,321 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1733709094225.32afcd71342a11f90a484ea7fca2803d. 2024-12-09T01:51:59,321 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:51:59,321 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:51:59,321 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709119316Running coprocessor pre-close hooks at 1733709119316Disabling compacts and flushes for region at 1733709119316Disabling writes for close at 1733709119316Writing region close event to WAL at 1733709119317 (+1 ms)Running coprocessor post-close hooks at 1733709119321 (+4 ms)Closed at 1733709119321 2024-12-09T01:51:59,321 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T01:51:59,464 INFO [regionserver/ef6f18c58dc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T01:51:59,464 INFO [regionserver/ef6f18c58dc9:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T01:51:59,469 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:51:59,516 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,38509,1733709093073; all regions closed. 2024-12-09T01:51:59,516 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,517 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,517 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,517 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,517 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:51:59,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741842_1025 (size=825) 2024-12-09T01:51:59,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741842_1025 (size=825) 2024-12-09T01:51:59,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:00,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:00,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:00,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:01,142 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:01,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:01,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:02,143 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:02,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:02,961 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T01:52:02,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:02,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 after 68063ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:52:03,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:52:03,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T01:52:03,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-12-09T01:52:03,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:03,200 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T01:52:03,200 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709094018.meta after 4001ms 2024-12-09T01:52:03,201 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/WALs/ef6f18c58dc9,38509,1733709093073/ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709094018.meta to hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/oldWALs/ef6f18c58dc9%2C38509%2C1733709093073.meta.1733709094018.meta 2024-12-09T01:52:03,203 DEBUG [RS:0;ef6f18c58dc9:38509 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/oldWALs 2024-12-09T01:52:03,203 INFO [RS:0;ef6f18c58dc9:38509 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C38509%2C1733709093073.meta:.meta(num 1733709119193) 2024-12-09T01:52:03,204 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,204 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,204 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,204 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,204 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:03,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741840_1023 (size=1162) 2024-12-09T01:52:03,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741840_1023 (size=1162) 2024-12-09T01:52:03,210 DEBUG [RS:0;ef6f18c58dc9:38509 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/oldWALs 2024-12-09T01:52:03,211 INFO [RS:0;ef6f18c58dc9:38509 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C38509%2C1733709093073:(num 1733709119150) 2024-12-09T01:52:03,211 DEBUG [RS:0;ef6f18c58dc9:38509 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:52:03,211 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:52:03,211 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:52:03,211 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T01:52:03,211 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:52:03,211 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:52:03,211 INFO [RS:0;ef6f18c58dc9:38509 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38509 2024-12-09T01:52:03,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,38509,1733709093073 2024-12-09T01:52:03,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:52:03,216 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:52:03,217 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,38509,1733709093073] 2024-12-09T01:52:03,222 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,38509,1733709093073 already deleted, retry=false 2024-12-09T01:52:03,222 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,38509,1733709093073 expired; onlineServers=0 2024-12-09T01:52:03,222 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef6f18c58dc9,37609,1733709092988' ***** 2024-12-09T01:52:03,222 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T01:52:03,222 INFO [M:0;ef6f18c58dc9:37609 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:52:03,223 INFO [M:0;ef6f18c58dc9:37609 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:52:03,223 DEBUG [M:0;ef6f18c58dc9:37609 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T01:52:03,223 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T01:52:03,223 DEBUG [M:0;ef6f18c58dc9:37609 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T01:52:03,223 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709093371 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709093371,5,FailOnTimeoutGroup] 2024-12-09T01:52:03,223 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709093372 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709093372,5,FailOnTimeoutGroup] 2024-12-09T01:52:03,223 INFO [M:0;ef6f18c58dc9:37609 {}] hbase.ChoreService(370): Chore service for: master/ef6f18c58dc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T01:52:03,223 INFO [M:0;ef6f18c58dc9:37609 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:52:03,223 DEBUG [M:0;ef6f18c58dc9:37609 {}] master.HMaster(1795): Stopping service threads 2024-12-09T01:52:03,223 INFO [M:0;ef6f18c58dc9:37609 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T01:52:03,224 INFO [M:0;ef6f18c58dc9:37609 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:52:03,224 INFO [M:0;ef6f18c58dc9:37609 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T01:52:03,224 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T01:52:03,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T01:52:03,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:03,225 DEBUG [M:0;ef6f18c58dc9:37609 {}] zookeeper.ZKUtil(347): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T01:52:03,225 WARN [M:0;ef6f18c58dc9:37609 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T01:52:03,225 INFO [M:0;ef6f18c58dc9:37609 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/.lastflushedseqids 2024-12-09T01:52:03,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741846_1030 (size=130) 2024-12-09T01:52:03,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741846_1030 (size=130) 2024-12-09T01:52:03,231 INFO [M:0;ef6f18c58dc9:37609 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T01:52:03,232 INFO [M:0;ef6f18c58dc9:37609 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T01:52:03,232 DEBUG [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:52:03,232 INFO [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:52:03,232 DEBUG [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:52:03,232 DEBUG [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:52:03,232 DEBUG [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:52:03,232 INFO [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-12-09T01:52:03,232 ERROR [FSHLog-0-hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData-prefix:ef6f18c58dc9,37609,1733709092988 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:52:03,232 WARN [FSHLog-0-hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData-prefix:ef6f18c58dc9,37609,1733709092988 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:52:03,233 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog ef6f18c58dc9%2C37609%2C1733709092988:(num 1733709093189) roll requested 2024-12-09T01:52:03,233 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C37609%2C1733709092988.1733709123233 2024-12-09T01:52:03,240 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,240 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,240 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,240 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,240 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,241 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/WALs/ef6f18c58dc9,37609,1733709092988/ef6f18c58dc9%2C37609%2C1733709092988.1733709093189 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/WALs/ef6f18c58dc9,37609,1733709092988/ef6f18c58dc9%2C37609%2C1733709092988.1733709123233 2024-12-09T01:52:03,241 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:52:03,241 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:43857,DS-cedbe907-b73d-43f0-88eb-987ea205b350,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-12-09T01:52:03,241 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/WALs/ef6f18c58dc9,37609,1733709092988/ef6f18c58dc9%2C37609%2C1733709092988.1733709093189 2024-12-09T01:52:03,241 WARN [IPC Server handler 2 on default port 43413 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/WALs/ef6f18c58dc9,37609,1733709092988/ef6f18c58dc9%2C37609%2C1733709092988.1733709093189 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-12-09T01:52:03,242 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/WALs/ef6f18c58dc9,37609,1733709092988/ef6f18c58dc9%2C37609%2C1733709092988.1733709093189 after 1ms 2024-12-09T01:52:03,243 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39963:39963),(127.0.0.1/127.0.0.1:33745:33745)] 2024-12-09T01:52:03,243 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/WALs/ef6f18c58dc9,37609,1733709092988/ef6f18c58dc9%2C37609%2C1733709092988.1733709093189 is not closed yet, will try archiving it next time 2024-12-09T01:52:03,258 DEBUG [M:0;ef6f18c58dc9:37609 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/eebe603a1534480cb0c4d7498b49d50a is 82, key is hbase:meta,,1/info:regioninfo/1733709094075/Put/seqid=0 2024-12-09T01:52:03,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741848_1033 (size=5672) 2024-12-09T01:52:03,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741848_1033 (size=5672) 2024-12-09T01:52:03,264 INFO [M:0;ef6f18c58dc9:37609 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/eebe603a1534480cb0c4d7498b49d50a 2024-12-09T01:52:03,284 DEBUG [M:0;ef6f18c58dc9:37609 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8f2deb7794f9474c8406e0edc2add4f2 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733709094613/Put/seqid=0 2024-12-09T01:52:03,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741849_1034 (size=6118) 2024-12-09T01:52:03,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741849_1034 (size=6118) 2024-12-09T01:52:03,291 INFO [M:0;ef6f18c58dc9:37609 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8f2deb7794f9474c8406e0edc2add4f2 2024-12-09T01:52:03,310 DEBUG [M:0;ef6f18c58dc9:37609 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ebb684b656794c8286838ca110ee22d9 is 69, key is ef6f18c58dc9,38509,1733709093073/rs:state/1733709093436/Put/seqid=0 2024-12-09T01:52:03,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741850_1035 (size=5156) 2024-12-09T01:52:03,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741850_1035 (size=5156) 2024-12-09T01:52:03,315 INFO [M:0;ef6f18c58dc9:37609 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ebb684b656794c8286838ca110ee22d9 2024-12-09T01:52:03,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:52:03,321 INFO [RS:0;ef6f18c58dc9:38509 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:52:03,321 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38509-0x1007479114a0001, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:52:03,321 INFO [RS:0;ef6f18c58dc9:38509 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,38509,1733709093073; zookeeper connection closed. 2024-12-09T01:52:03,322 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4fbcc1c2 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4fbcc1c2 2024-12-09T01:52:03,322 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T01:52:03,334 DEBUG [M:0;ef6f18c58dc9:37609 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c2fd63308fa4366952b23b84112e993 is 52, key is load_balancer_on/state:d/1733709094214/Put/seqid=0 2024-12-09T01:52:03,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741851_1036 (size=5056) 2024-12-09T01:52:03,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741851_1036 (size=5056) 2024-12-09T01:52:03,339 INFO [M:0;ef6f18c58dc9:37609 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c2fd63308fa4366952b23b84112e993 2024-12-09T01:52:03,344 DEBUG [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/eebe603a1534480cb0c4d7498b49d50a as hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/eebe603a1534480cb0c4d7498b49d50a 2024-12-09T01:52:03,349 INFO [M:0;ef6f18c58dc9:37609 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/eebe603a1534480cb0c4d7498b49d50a, entries=8, sequenceid=56, filesize=5.5 K 2024-12-09T01:52:03,350 DEBUG [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8f2deb7794f9474c8406e0edc2add4f2 as hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8f2deb7794f9474c8406e0edc2add4f2 2024-12-09T01:52:03,356 INFO [M:0;ef6f18c58dc9:37609 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8f2deb7794f9474c8406e0edc2add4f2, entries=6, sequenceid=56, filesize=6.0 K 2024-12-09T01:52:03,358 DEBUG [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ebb684b656794c8286838ca110ee22d9 as hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ebb684b656794c8286838ca110ee22d9 2024-12-09T01:52:03,364 INFO [M:0;ef6f18c58dc9:37609 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ebb684b656794c8286838ca110ee22d9, entries=1, sequenceid=56, filesize=5.0 K 2024-12-09T01:52:03,365 DEBUG [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c2fd63308fa4366952b23b84112e993 as hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3c2fd63308fa4366952b23b84112e993 2024-12-09T01:52:03,371 INFO [M:0;ef6f18c58dc9:37609 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3c2fd63308fa4366952b23b84112e993, entries=1, sequenceid=56, filesize=4.9 K 2024-12-09T01:52:03,372 INFO [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=56, compaction requested=false 2024-12-09T01:52:03,374 INFO [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:52:03,374 DEBUG [M:0;ef6f18c58dc9:37609 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709123232Disabling compacts and flushes for region at 1733709123232Disabling writes for close at 1733709123232Obtaining lock to block concurrent updates at 1733709123232Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733709123232Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1733709123232Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733709123243 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733709123243Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733709123258 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733709123258Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733709123269 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733709123283 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733709123283Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733709123296 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733709123309 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733709123309Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733709123320 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733709123333 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733709123333Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26663e97: reopening flushed file at 1733709123344 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65cec6bb: reopening flushed file at 1733709123350 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a0dc7f5: reopening flushed file at 1733709123357 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24cef528: reopening flushed file at 1733709123364 (+7 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 140ms, sequenceid=56, compaction requested=false at 1733709123372 (+8 ms)Writing region close event to WAL at 1733709123374 (+2 ms)Closed at 1733709123374 2024-12-09T01:52:03,374 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,375 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,375 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,375 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,375 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:03,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35007 is added to blk_1073741847_1031 (size=757) 2024-12-09T01:52:03,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46857 is added to blk_1073741847_1031 (size=757) 2024-12-09T01:52:03,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:04,144 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:04,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:04,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,335 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,335 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,335 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,336 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,339 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,341 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,344 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,847 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T01:52:04,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,848 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,863 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,864 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,867 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,868 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,870 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:04,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:05,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:05,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:05,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:06,145 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:06,200 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-12-09T01:52:06,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:06,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:07,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:07,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:07,242 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/WALs/ef6f18c58dc9,37609,1733709092988/ef6f18c58dc9%2C37609%2C1733709092988.1733709093189 after 4001ms 2024-12-09T01:52:07,243 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/WALs/ef6f18c58dc9,37609,1733709092988/ef6f18c58dc9%2C37609%2C1733709092988.1733709093189 to hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/oldWALs/ef6f18c58dc9%2C37609%2C1733709092988.1733709093189 2024-12-09T01:52:07,246 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/MasterData/oldWALs/ef6f18c58dc9%2C37609%2C1733709092988.1733709093189 to hdfs://localhost:43413/user/jenkins/test-data/e531e3b2-6066-8d59-31ed-7ed93eeb237c/oldWALs/ef6f18c58dc9%2C37609%2C1733709092988.1733709093189$masterlocalwal$ 2024-12-09T01:52:07,246 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:52:07,246 INFO [M:0;ef6f18c58dc9:37609 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T01:52:07,246 INFO [M:0;ef6f18c58dc9:37609 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37609 2024-12-09T01:52:07,247 INFO [M:0;ef6f18c58dc9:37609 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:52:07,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:52:07,349 INFO [M:0;ef6f18c58dc9:37609 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:52:07,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37609-0x1007479114a0000, quorum=127.0.0.1:63721, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:52:07,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7ff0f915{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:52:07,351 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58a9274b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:52:07,351 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:52:07,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17312068{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:52:07,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ede944f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,STOPPED} 2024-12-09T01:52:07,353 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:52:07,353 WARN [BP-2069266944-172.17.0.2-1733709091959 heartbeating to localhost/127.0.0.1:43413 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:52:07,353 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:52:07,353 WARN [BP-2069266944-172.17.0.2-1733709091959 heartbeating to localhost/127.0.0.1:43413 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2069266944-172.17.0.2-1733709091959 (Datanode Uuid 0e7f2e3c-18d3-4a81-975d-83e740a7cd7b) service to localhost/127.0.0.1:43413 2024-12-09T01:52:07,354 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data3/current/BP-2069266944-172.17.0.2-1733709091959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:52:07,354 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data4/current/BP-2069266944-172.17.0.2-1733709091959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:52:07,354 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:52:07,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@45604664{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:52:07,357 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6e793ffb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:52:07,357 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:52:07,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@571de0fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:52:07,357 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75f58649{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,STOPPED} 2024-12-09T01:52:07,358 WARN [BP-2069266944-172.17.0.2-1733709091959 heartbeating to localhost/127.0.0.1:43413 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:52:07,359 WARN [BP-2069266944-172.17.0.2-1733709091959 heartbeating to localhost/127.0.0.1:43413 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2069266944-172.17.0.2-1733709091959 (Datanode Uuid c141504a-e754-4899-983b-758e64bcb62c) service to localhost/127.0.0.1:43413 2024-12-09T01:52:07,359 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:52:07,359 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:52:07,360 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data1/current/BP-2069266944-172.17.0.2-1733709091959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:52:07,360 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/cluster_0984c77f-9f8c-0682-ea32-ce293d1844cc/data/data2/current/BP-2069266944-172.17.0.2-1733709091959 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:52:07,360 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:52:07,366 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f6d993c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:52:07,366 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ea918cd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:52:07,366 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:52:07,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10e56c5a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:52:07,367 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c727387{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir/,STOPPED} 2024-12-09T01:52:07,373 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T01:52:07,390 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T01:52:07,399 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=181 (was 156) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43413 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:43413 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43413 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43413 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:43413 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43413 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43413 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43413 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 438) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=220 (was 246), ProcessCount=11 (was 11), AvailableMemoryMB=11696 (was 12446) 2024-12-09T01:52:07,407 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=181, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=220, ProcessCount=11, AvailableMemoryMB=11695 2024-12-09T01:52:07,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T01:52:07,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.log.dir so I do NOT create it in target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a 2024-12-09T01:52:07,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d74f9a14-8ebf-fe76-6de8-e8447a2aadf3/hadoop.tmp.dir so I do NOT create it in target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a 2024-12-09T01:52:07,408 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/cluster_ca30ed38-7fe8-0dd8-9f41-cfcd317b33b1, deleteOnExit=true 2024-12-09T01:52:07,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T01:52:07,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/test.cache.data in system properties and HBase conf 2024-12-09T01:52:07,408 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T01:52:07,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/hadoop.log.dir in system properties and HBase conf 2024-12-09T01:52:07,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T01:52:07,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T01:52:07,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T01:52:07,409 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T01:52:07,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:52:07,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:52:07,409 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T01:52:07,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:52:07,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T01:52:07,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T01:52:07,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:52:07,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:52:07,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T01:52:07,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/nfs.dump.dir in system properties and HBase conf 2024-12-09T01:52:07,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/java.io.tmpdir in system properties and HBase conf 2024-12-09T01:52:07,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:52:07,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T01:52:07,410 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T01:52:07,423 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:52:07,484 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:52:07,488 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:52:07,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:52:07,496 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:52:07,496 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:52:07,498 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:52:07,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70e475c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:52:07,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@290f188f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:52:07,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62558ec9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/java.io.tmpdir/jetty-localhost-33547-hadoop-hdfs-3_4_1-tests_jar-_-any-9233726303792632393/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:52:07,623 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@294894f5{HTTP/1.1, (http/1.1)}{localhost:33547} 2024-12-09T01:52:07,623 INFO [Time-limited test {}] server.Server(415): Started @190360ms 2024-12-09T01:52:07,636 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:52:07,701 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:52:07,705 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:52:07,706 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:52:07,706 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:52:07,706 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:52:07,706 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e6ee234{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:52:07,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b5b6aa1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:52:07,821 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@48bfafbe{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/java.io.tmpdir/jetty-localhost-38761-hadoop-hdfs-3_4_1-tests_jar-_-any-12791097254570400205/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:52:07,821 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b93cee9{HTTP/1.1, (http/1.1)}{localhost:38761} 2024-12-09T01:52:07,821 INFO [Time-limited test {}] server.Server(415): Started @190558ms 2024-12-09T01:52:07,822 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:52:07,860 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:52:07,863 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:52:07,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:52:07,864 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:52:07,864 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:52:07,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20f59884{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:52:07,865 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@190ad9e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:52:07,915 WARN [Thread-1654 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/cluster_ca30ed38-7fe8-0dd8-9f41-cfcd317b33b1/data/data1/current/BP-836402380-172.17.0.2-1733709127439/current, will proceed with Du for space computation calculation, 2024-12-09T01:52:07,915 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/cluster_ca30ed38-7fe8-0dd8-9f41-cfcd317b33b1/data/data2/current/BP-836402380-172.17.0.2-1733709127439/current, will proceed with Du for space computation calculation, 2024-12-09T01:52:07,937 WARN [Thread-1633 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:52:07,940 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xefa6e11e7e653636 with lease ID 0x691232de426446c0: Processing first storage report for DS-b3b4d81a-e250-4a54-a392-dee52615fc24 from datanode DatanodeRegistration(127.0.0.1:38615, datanodeUuid=1b05c82e-8175-498d-8ea0-b0f1901cc57e, infoPort=35567, infoSecurePort=0, ipcPort=39815, storageInfo=lv=-57;cid=testClusterID;nsid=123760093;c=1733709127439) 2024-12-09T01:52:07,940 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xefa6e11e7e653636 with lease ID 0x691232de426446c0: from storage DS-b3b4d81a-e250-4a54-a392-dee52615fc24 node DatanodeRegistration(127.0.0.1:38615, datanodeUuid=1b05c82e-8175-498d-8ea0-b0f1901cc57e, infoPort=35567, infoSecurePort=0, ipcPort=39815, storageInfo=lv=-57;cid=testClusterID;nsid=123760093;c=1733709127439), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:52:07,940 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xefa6e11e7e653636 with lease ID 0x691232de426446c0: Processing first storage report for DS-ac79fd87-4929-4de5-9f5e-f4b9265cff38 from datanode DatanodeRegistration(127.0.0.1:38615, datanodeUuid=1b05c82e-8175-498d-8ea0-b0f1901cc57e, infoPort=35567, infoSecurePort=0, ipcPort=39815, storageInfo=lv=-57;cid=testClusterID;nsid=123760093;c=1733709127439) 2024-12-09T01:52:07,940 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xefa6e11e7e653636 with lease ID 0x691232de426446c0: from storage DS-ac79fd87-4929-4de5-9f5e-f4b9265cff38 node DatanodeRegistration(127.0.0.1:38615, datanodeUuid=1b05c82e-8175-498d-8ea0-b0f1901cc57e, infoPort=35567, infoSecurePort=0, ipcPort=39815, storageInfo=lv=-57;cid=testClusterID;nsid=123760093;c=1733709127439), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:52:07,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:08,000 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@fc981fd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/java.io.tmpdir/jetty-localhost-43687-hadoop-hdfs-3_4_1-tests_jar-_-any-6579764221685917502/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:52:08,000 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1367dc96{HTTP/1.1, (http/1.1)}{localhost:43687} 2024-12-09T01:52:08,000 INFO [Time-limited test {}] server.Server(415): Started @190738ms 2024-12-09T01:52:08,001 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:52:08,082 WARN [Thread-1680 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/cluster_ca30ed38-7fe8-0dd8-9f41-cfcd317b33b1/data/data3/current/BP-836402380-172.17.0.2-1733709127439/current, will proceed with Du for space computation calculation, 2024-12-09T01:52:08,082 WARN [Thread-1681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/cluster_ca30ed38-7fe8-0dd8-9f41-cfcd317b33b1/data/data4/current/BP-836402380-172.17.0.2-1733709127439/current, will proceed with Du for space computation calculation, 2024-12-09T01:52:08,100 WARN [Thread-1669 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:52:08,103 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2834d1bd0e45b14c with lease ID 0x691232de426446c1: Processing first storage report for DS-47e05dfc-c280-4016-b060-ec5a737d5c51 from datanode DatanodeRegistration(127.0.0.1:38611, datanodeUuid=cdf93d58-db8a-458f-9997-47e50d19a7ce, infoPort=42323, infoSecurePort=0, ipcPort=39187, storageInfo=lv=-57;cid=testClusterID;nsid=123760093;c=1733709127439) 2024-12-09T01:52:08,103 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2834d1bd0e45b14c with lease ID 0x691232de426446c1: from storage DS-47e05dfc-c280-4016-b060-ec5a737d5c51 node DatanodeRegistration(127.0.0.1:38611, datanodeUuid=cdf93d58-db8a-458f-9997-47e50d19a7ce, infoPort=42323, infoSecurePort=0, ipcPort=39187, storageInfo=lv=-57;cid=testClusterID;nsid=123760093;c=1733709127439), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:52:08,103 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2834d1bd0e45b14c with lease ID 0x691232de426446c1: Processing first storage report for DS-782bdc73-50b2-4b47-a90c-9e81574bf6a9 from datanode DatanodeRegistration(127.0.0.1:38611, datanodeUuid=cdf93d58-db8a-458f-9997-47e50d19a7ce, infoPort=42323, infoSecurePort=0, ipcPort=39187, storageInfo=lv=-57;cid=testClusterID;nsid=123760093;c=1733709127439) 2024-12-09T01:52:08,103 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2834d1bd0e45b14c with lease ID 0x691232de426446c1: from storage DS-782bdc73-50b2-4b47-a90c-9e81574bf6a9 node DatanodeRegistration(127.0.0.1:38611, datanodeUuid=cdf93d58-db8a-458f-9997-47e50d19a7ce, infoPort=42323, infoSecurePort=0, ipcPort=39187, storageInfo=lv=-57;cid=testClusterID;nsid=123760093;c=1733709127439), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:52:08,123 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a 2024-12-09T01:52:08,126 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/cluster_ca30ed38-7fe8-0dd8-9f41-cfcd317b33b1/zookeeper_0, clientPort=60018, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/cluster_ca30ed38-7fe8-0dd8-9f41-cfcd317b33b1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/cluster_ca30ed38-7fe8-0dd8-9f41-cfcd317b33b1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T01:52:08,126 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60018 2024-12-09T01:52:08,127 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:52:08,128 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:52:08,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:52:08,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:52:08,137 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2 with version=8 2024-12-09T01:52:08,137 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/hbase-staging 2024-12-09T01:52:08,139 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:52:08,139 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:52:08,139 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:52:08,140 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:52:08,140 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:52:08,140 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:52:08,140 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T01:52:08,140 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:52:08,141 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34877 2024-12-09T01:52:08,142 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34877 connecting to ZooKeeper ensemble=127.0.0.1:60018 2024-12-09T01:52:08,146 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:08,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348770x0, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:52:08,148 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34877-0x10074799aa40000 connected 2024-12-09T01:52:08,160 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:52:08,162 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:52:08,163 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:52:08,164 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2, hbase.cluster.distributed=false 2024-12-09T01:52:08,165 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:52:08,165 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34877 2024-12-09T01:52:08,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34877 2024-12-09T01:52:08,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34877 2024-12-09T01:52:08,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34877 2024-12-09T01:52:08,166 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34877 2024-12-09T01:52:08,181 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:52:08,182 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:52:08,182 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:52:08,182 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:52:08,182 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:52:08,182 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:52:08,182 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:52:08,182 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:52:08,183 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36587 2024-12-09T01:52:08,184 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36587 connecting to ZooKeeper ensemble=127.0.0.1:60018 2024-12-09T01:52:08,184 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:52:08,186 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:52:08,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:365870x0, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:52:08,190 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:52:08,190 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36587-0x10074799aa40001 connected 2024-12-09T01:52:08,190 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:52:08,191 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:52:08,191 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:52:08,192 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:52:08,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36587 2024-12-09T01:52:08,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36587 2024-12-09T01:52:08,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36587 2024-12-09T01:52:08,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36587 2024-12-09T01:52:08,193 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36587 2024-12-09T01:52:08,205 DEBUG [M:0;ef6f18c58dc9:34877 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef6f18c58dc9:34877 2024-12-09T01:52:08,205 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef6f18c58dc9,34877,1733709128139 2024-12-09T01:52:08,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:52:08,207 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:52:08,207 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef6f18c58dc9,34877,1733709128139 2024-12-09T01:52:08,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:08,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:52:08,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:08,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:08,209 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T01:52:08,210 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef6f18c58dc9,34877,1733709128139 from backup master directory 2024-12-09T01:52:08,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef6f18c58dc9,34877,1733709128139 2024-12-09T01:52:08,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:52:08,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:52:08,212 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:52:08,212 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef6f18c58dc9,34877,1733709128139 2024-12-09T01:52:08,216 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/hbase.id] with ID: 0bc9af13-e812-49e1-8c4b-5596e2486b55 2024-12-09T01:52:08,216 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/.tmp/hbase.id 2024-12-09T01:52:08,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:52:08,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:52:08,222 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/.tmp/hbase.id]:[hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/hbase.id] 2024-12-09T01:52:08,232 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:52:08,232 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T01:52:08,233 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T01:52:08,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:08,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:08,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:52:08,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:52:08,241 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:52:08,242 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T01:52:08,242 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:52:08,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:52:08,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:52:08,249 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store 2024-12-09T01:52:08,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:52:08,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:52:08,255 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:52:08,255 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:52:08,255 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:52:08,255 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:52:08,255 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:52:08,255 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:52:08,255 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:52:08,255 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709128255Disabling compacts and flushes for region at 1733709128255Disabling writes for close at 1733709128255Writing region close event to WAL at 1733709128255Closed at 1733709128255 2024-12-09T01:52:08,256 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/.initializing 2024-12-09T01:52:08,256 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/WALs/ef6f18c58dc9,34877,1733709128139 2024-12-09T01:52:08,258 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C34877%2C1733709128139, suffix=, logDir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/WALs/ef6f18c58dc9,34877,1733709128139, archiveDir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/oldWALs, maxLogs=10 2024-12-09T01:52:08,259 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C34877%2C1733709128139.1733709128259 2024-12-09T01:52:08,263 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/WALs/ef6f18c58dc9,34877,1733709128139/ef6f18c58dc9%2C34877%2C1733709128139.1733709128259 2024-12-09T01:52:08,267 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35567:35567),(127.0.0.1/127.0.0.1:42323:42323)] 2024-12-09T01:52:08,268 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:52:08,268 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:52:08,268 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:52:08,268 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:52:08,269 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:52:08,270 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T01:52:08,270 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,271 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:52:08,271 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:52:08,272 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T01:52:08,272 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,272 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:52:08,272 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:52:08,273 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T01:52:08,273 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:52:08,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:52:08,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T01:52:08,275 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:52:08,275 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:52:08,276 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:52:08,276 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:52:08,278 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:52:08,278 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:52:08,279 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:52:08,280 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:52:08,282 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:52:08,282 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=690709, jitterRate=-0.12171900272369385}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:52:08,283 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733709128268Initializing all the Stores at 1733709128269 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709128269Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709128269Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709128269Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709128269Cleaning up temporary data from old regions at 1733709128278 (+9 ms)Region opened successfully at 1733709128283 (+5 ms) 2024-12-09T01:52:08,284 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T01:52:08,287 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f974ed0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:52:08,288 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T01:52:08,288 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T01:52:08,288 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T01:52:08,288 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T01:52:08,289 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T01:52:08,289 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T01:52:08,289 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T01:52:08,291 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T01:52:08,292 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T01:52:08,294 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T01:52:08,294 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T01:52:08,294 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T01:52:08,296 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T01:52:08,296 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T01:52:08,297 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T01:52:08,298 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T01:52:08,299 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T01:52:08,300 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T01:52:08,302 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T01:52:08,303 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T01:52:08,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:52:08,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:52:08,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:08,305 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:08,305 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef6f18c58dc9,34877,1733709128139, sessionid=0x10074799aa40000, setting cluster-up flag (Was=false) 2024-12-09T01:52:08,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:08,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:08,312 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T01:52:08,313 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,34877,1733709128139 2024-12-09T01:52:08,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:08,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:08,321 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T01:52:08,322 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,34877,1733709128139 2024-12-09T01:52:08,323 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T01:52:08,325 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T01:52:08,325 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T01:52:08,325 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T01:52:08,325 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef6f18c58dc9,34877,1733709128139 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T01:52:08,326 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:52:08,326 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:52:08,326 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:52:08,326 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:52:08,326 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef6f18c58dc9:0, corePoolSize=10, maxPoolSize=10 2024-12-09T01:52:08,327 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,327 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:52:08,327 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,327 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733709158327 2024-12-09T01:52:08,327 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T01:52:08,328 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T01:52:08,328 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T01:52:08,328 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T01:52:08,328 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T01:52:08,328 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T01:52:08,328 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,328 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:52:08,328 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T01:52:08,328 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T01:52:08,328 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T01:52:08,328 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T01:52:08,329 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T01:52:08,329 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T01:52:08,329 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709128329,5,FailOnTimeoutGroup] 2024-12-09T01:52:08,329 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709128329,5,FailOnTimeoutGroup] 2024-12-09T01:52:08,329 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,329 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T01:52:08,329 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,329 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,329 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,329 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T01:52:08,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:52:08,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:52:08,336 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T01:52:08,336 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2 2024-12-09T01:52:08,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:52:08,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:52:08,343 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:52:08,344 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:52:08,345 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:52:08,345 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:52:08,345 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:52:08,346 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:52:08,346 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,347 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:52:08,347 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:52:08,348 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:52:08,348 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,348 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:52:08,348 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:52:08,349 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:52:08,349 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,349 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:52:08,349 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:52:08,350 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740 2024-12-09T01:52:08,350 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740 2024-12-09T01:52:08,351 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:52:08,351 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:52:08,352 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:52:08,353 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:52:08,354 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:52:08,355 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877812, jitterRate=0.1161966472864151}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:52:08,355 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733709128343Initializing all the Stores at 1733709128343Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709128343Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709128343Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709128343Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709128343Cleaning up temporary data from old regions at 1733709128351 (+8 ms)Region opened successfully at 1733709128355 (+4 ms) 2024-12-09T01:52:08,355 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:52:08,355 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:52:08,355 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:52:08,355 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:52:08,355 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:52:08,356 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:52:08,356 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709128355Disabling compacts and flushes for region at 1733709128355Disabling writes for close at 1733709128355Writing region close event to WAL at 1733709128356 (+1 ms)Closed at 1733709128356 2024-12-09T01:52:08,357 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:52:08,357 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T01:52:08,357 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T01:52:08,358 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:52:08,359 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T01:52:08,395 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(746): ClusterId : 0bc9af13-e812-49e1-8c4b-5596e2486b55 2024-12-09T01:52:08,395 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:52:08,397 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:52:08,397 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:52:08,399 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:52:08,400 DEBUG [RS:0;ef6f18c58dc9:36587 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56f911a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:52:08,412 DEBUG [RS:0;ef6f18c58dc9:36587 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef6f18c58dc9:36587 2024-12-09T01:52:08,412 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:52:08,412 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:52:08,412 DEBUG [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:52:08,412 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,34877,1733709128139 with port=36587, startcode=1733709128181 2024-12-09T01:52:08,413 DEBUG [RS:0;ef6f18c58dc9:36587 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:52:08,414 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34849, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:52:08,415 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34877 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:08,415 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34877 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:08,417 DEBUG [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2 2024-12-09T01:52:08,417 DEBUG [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35509 2024-12-09T01:52:08,417 DEBUG [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:52:08,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:52:08,419 DEBUG [RS:0;ef6f18c58dc9:36587 {}] zookeeper.ZKUtil(111): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:08,419 WARN [RS:0;ef6f18c58dc9:36587 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:52:08,419 INFO [RS:0;ef6f18c58dc9:36587 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:52:08,419 DEBUG [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:08,419 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,36587,1733709128181] 2024-12-09T01:52:08,424 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:52:08,426 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:52:08,426 INFO [RS:0;ef6f18c58dc9:36587 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:52:08,426 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,427 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:52:08,428 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:52:08,428 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:52:08,428 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:52:08,429 DEBUG [RS:0;ef6f18c58dc9:36587 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:52:08,429 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,429 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,429 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,429 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,429 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,429 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,36587,1733709128181-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:52:08,452 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:52:08,453 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,36587,1733709128181-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,453 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,453 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.Replication(171): ef6f18c58dc9,36587,1733709128181 started 2024-12-09T01:52:08,471 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,471 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,36587,1733709128181, RpcServer on ef6f18c58dc9/172.17.0.2:36587, sessionid=0x10074799aa40001 2024-12-09T01:52:08,471 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:52:08,472 DEBUG [RS:0;ef6f18c58dc9:36587 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:08,472 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,36587,1733709128181' 2024-12-09T01:52:08,472 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:52:08,472 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:52:08,473 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:52:08,473 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:52:08,473 DEBUG [RS:0;ef6f18c58dc9:36587 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:08,473 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,36587,1733709128181' 2024-12-09T01:52:08,473 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:52:08,473 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:52:08,474 DEBUG [RS:0;ef6f18c58dc9:36587 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:52:08,474 INFO [RS:0;ef6f18c58dc9:36587 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:52:08,474 INFO [RS:0;ef6f18c58dc9:36587 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:52:08,510 WARN [ef6f18c58dc9:34877 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T01:52:08,576 INFO [RS:0;ef6f18c58dc9:36587 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C36587%2C1733709128181, suffix=, logDir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181, archiveDir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/oldWALs, maxLogs=32 2024-12-09T01:52:08,576 INFO [RS:0;ef6f18c58dc9:36587 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C36587%2C1733709128181.1733709128576 2024-12-09T01:52:08,585 INFO [RS:0;ef6f18c58dc9:36587 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181/ef6f18c58dc9%2C36587%2C1733709128181.1733709128576 2024-12-09T01:52:08,586 DEBUG [RS:0;ef6f18c58dc9:36587 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42323:42323),(127.0.0.1/127.0.0.1:35567:35567)] 2024-12-09T01:52:08,760 DEBUG [ef6f18c58dc9:34877 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T01:52:08,761 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:08,762 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,36587,1733709128181, state=OPENING 2024-12-09T01:52:08,764 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T01:52:08,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:08,765 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:08,766 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:52:08,766 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:52:08,766 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:52:08,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,36587,1733709128181}] 2024-12-09T01:52:08,919 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T01:52:08,921 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38877, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T01:52:08,925 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T01:52:08,925 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:52:08,927 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C36587%2C1733709128181.meta, suffix=.meta, logDir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181, archiveDir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/oldWALs, maxLogs=32 2024-12-09T01:52:08,928 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C36587%2C1733709128181.meta.1733709128928.meta 2024-12-09T01:52:08,932 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181/ef6f18c58dc9%2C36587%2C1733709128181.meta.1733709128928.meta 2024-12-09T01:52:08,933 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35567:35567),(127.0.0.1/127.0.0.1:42323:42323)] 2024-12-09T01:52:08,934 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:52:08,934 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T01:52:08,934 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T01:52:08,934 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T01:52:08,934 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T01:52:08,934 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:52:08,935 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T01:52:08,935 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T01:52:08,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:52:08,937 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:52:08,937 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,937 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:52:08,937 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:52:08,938 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:52:08,938 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:52:08,938 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:52:08,939 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:52:08,939 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,939 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:52:08,940 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:52:08,940 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:52:08,940 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:08,941 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:52:08,941 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:52:08,941 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740 2024-12-09T01:52:08,942 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740 2024-12-09T01:52:08,943 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:52:08,943 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:52:08,943 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:52:08,945 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:52:08,946 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=859567, jitterRate=0.09299615025520325}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:52:08,946 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T01:52:08,947 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733709128935Writing region info on filesystem at 1733709128935Initializing all the Stores at 1733709128935Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709128935Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709128936 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709128936Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709128936Cleaning up temporary data from old regions at 1733709128943 (+7 ms)Running coprocessor post-open hooks at 1733709128946 (+3 ms)Region opened successfully at 1733709128947 (+1 ms) 2024-12-09T01:52:08,948 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733709128919 2024-12-09T01:52:08,950 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T01:52:08,950 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T01:52:08,951 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:08,952 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,36587,1733709128181, state=OPEN 2024-12-09T01:52:08,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:52:08,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:52:08,957 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:08,957 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:52:08,957 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:52:08,960 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T01:52:08,960 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,36587,1733709128181 in 191 msec 2024-12-09T01:52:08,963 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T01:52:08,963 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 603 msec 2024-12-09T01:52:08,963 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:52:08,963 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T01:52:08,965 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:52:08,965 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,36587,1733709128181, seqNum=-1] 2024-12-09T01:52:08,965 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:52:08,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:08,967 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59111, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:52:08,973 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 648 msec 2024-12-09T01:52:08,973 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733709128973, completionTime=-1 2024-12-09T01:52:08,973 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T01:52:08,973 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T01:52:08,975 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T01:52:08,975 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733709188975 2024-12-09T01:52:08,975 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733709248975 2024-12-09T01:52:08,975 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-12-09T01:52:08,975 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34877,1733709128139-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,975 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34877,1733709128139-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,975 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34877,1733709128139-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,975 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef6f18c58dc9:34877, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,975 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,975 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,977 DEBUG [master/ef6f18c58dc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T01:52:08,979 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.767sec 2024-12-09T01:52:08,979 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T01:52:08,979 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T01:52:08,979 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T01:52:08,979 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T01:52:08,979 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T01:52:08,979 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34877,1733709128139-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:52:08,979 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34877,1733709128139-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T01:52:08,982 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T01:52:08,982 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T01:52:08,982 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34877,1733709128139-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:08,995 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e2a30ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:52:08,995 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ef6f18c58dc9,34877,-1 for getting cluster id 2024-12-09T01:52:08,996 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T01:52:08,999 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0bc9af13-e812-49e1-8c4b-5596e2486b55' 2024-12-09T01:52:08,999 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T01:52:08,999 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0bc9af13-e812-49e1-8c4b-5596e2486b55" 2024-12-09T01:52:09,000 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d3d6907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:52:09,000 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ef6f18c58dc9,34877,-1] 2024-12-09T01:52:09,000 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T01:52:09,000 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:52:09,001 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56918, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T01:52:09,002 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30873421, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:52:09,003 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:52:09,004 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,36587,1733709128181, seqNum=-1] 2024-12-09T01:52:09,004 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:52:09,005 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60320, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:52:09,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ef6f18c58dc9,34877,1733709128139 2024-12-09T01:52:09,007 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:52:09,009 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T01:52:09,010 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T01:52:09,011 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is ef6f18c58dc9,34877,1733709128139 2024-12-09T01:52:09,011 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@13e8f9ed 2024-12-09T01:52:09,011 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T01:52:09,012 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56926, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T01:52:09,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T01:52:09,013 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T01:52:09,013 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:52:09,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T01:52:09,015 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T01:52:09,015 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:09,015 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-12-09T01:52:09,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:52:09,017 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T01:52:09,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741835_1011 (size=405) 2024-12-09T01:52:09,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741835_1011 (size=405) 2024-12-09T01:52:09,025 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 63be67e37d72759b0b7ada06089b33da, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2 2024-12-09T01:52:09,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741836_1012 (size=88) 2024-12-09T01:52:09,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741836_1012 (size=88) 2024-12-09T01:52:09,032 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:52:09,032 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 63be67e37d72759b0b7ada06089b33da, disabling compactions & flushes 2024-12-09T01:52:09,032 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:09,032 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:09,032 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. after waiting 0 ms 2024-12-09T01:52:09,032 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:09,032 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:09,032 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 63be67e37d72759b0b7ada06089b33da: Waiting for close lock at 1733709129032Disabling compacts and flushes for region at 1733709129032Disabling writes for close at 1733709129032Writing region close event to WAL at 1733709129032Closed at 1733709129032 2024-12-09T01:52:09,034 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T01:52:09,034 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1733709129034"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733709129034"}]},"ts":"1733709129034"} 2024-12-09T01:52:09,037 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T01:52:09,038 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T01:52:09,039 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733709129038"}]},"ts":"1733709129038"} 2024-12-09T01:52:09,041 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-12-09T01:52:09,041 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=63be67e37d72759b0b7ada06089b33da, ASSIGN}] 2024-12-09T01:52:09,043 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=63be67e37d72759b0b7ada06089b33da, ASSIGN 2024-12-09T01:52:09,044 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=63be67e37d72759b0b7ada06089b33da, ASSIGN; state=OFFLINE, location=ef6f18c58dc9,36587,1733709128181; forceNewPlan=false, retain=false 2024-12-09T01:52:09,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:09,195 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=63be67e37d72759b0b7ada06089b33da, regionState=OPENING, regionLocation=ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:09,197 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=63be67e37d72759b0b7ada06089b33da, ASSIGN because future has completed 2024-12-09T01:52:09,198 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 63be67e37d72759b0b7ada06089b33da, server=ef6f18c58dc9,36587,1733709128181}] 2024-12-09T01:52:09,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:09,355 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:09,355 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 63be67e37d72759b0b7ada06089b33da, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:52:09,355 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:09,355 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:52:09,355 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:09,356 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:09,357 INFO [StoreOpener-63be67e37d72759b0b7ada06089b33da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:09,358 INFO [StoreOpener-63be67e37d72759b0b7ada06089b33da-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 63be67e37d72759b0b7ada06089b33da columnFamilyName info 2024-12-09T01:52:09,358 DEBUG [StoreOpener-63be67e37d72759b0b7ada06089b33da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:52:09,359 INFO [StoreOpener-63be67e37d72759b0b7ada06089b33da-1 {}] regionserver.HStore(327): Store=63be67e37d72759b0b7ada06089b33da/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:52:09,359 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:09,360 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:09,360 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:09,360 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:09,360 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:09,362 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:09,365 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:52:09,365 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 63be67e37d72759b0b7ada06089b33da; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=762335, jitterRate=-0.030642017722129822}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T01:52:09,365 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:09,366 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 63be67e37d72759b0b7ada06089b33da: Running coprocessor pre-open hook at 1733709129356Writing region info on filesystem at 1733709129356Initializing all the Stores at 1733709129356Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709129356Cleaning up temporary data from old regions at 1733709129360 (+4 ms)Running coprocessor post-open hooks at 1733709129365 (+5 ms)Region opened successfully at 1733709129366 (+1 ms) 2024-12-09T01:52:09,367 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da., pid=6, masterSystemTime=1733709129350 2024-12-09T01:52:09,370 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:09,370 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:09,371 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=63be67e37d72759b0b7ada06089b33da, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:09,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 63be67e37d72759b0b7ada06089b33da, server=ef6f18c58dc9,36587,1733709128181 because future has completed 2024-12-09T01:52:09,377 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T01:52:09,378 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 63be67e37d72759b0b7ada06089b33da, server=ef6f18c58dc9,36587,1733709128181 in 177 msec 2024-12-09T01:52:09,380 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T01:52:09,380 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=63be67e37d72759b0b7ada06089b33da, ASSIGN in 337 msec 2024-12-09T01:52:09,382 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T01:52:09,382 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733709129382"}]},"ts":"1733709129382"} 2024-12-09T01:52:09,385 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-12-09T01:52:09,386 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T01:52:09,388 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 373 msec 2024-12-09T01:52:09,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:10,147 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:10,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:10,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:11,148 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:11,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:11,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:12,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:12,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:12,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:13,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-09T01:52:13,060 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-09T01:52:13,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:52:13,060 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-09T01:52:13,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T01:52:13,060 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T01:52:13,149 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:13,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:13,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:14,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:14,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:14,436 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T01:52:14,437 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,438 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,439 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,460 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,461 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,462 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,467 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,467 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,468 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,472 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:52:14,478 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T01:52:14,479 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-12-09T01:52:14,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:15,150 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:15,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:15,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:16,151 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:16,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:16,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:17,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:17,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:17,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:18,152 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:18,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:18,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:19,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:52:19,115 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T01:52:19,116 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-12-09T01:52:19,119 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T01:52:19,119 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:19,121 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da., hostname=ef6f18c58dc9,36587,1733709128181, seqNum=2] 2024-12-09T01:52:19,129 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T01:52:19,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T01:52:19,135 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T01:52:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T01:52:19,136 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T01:52:19,138 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T01:52:19,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:19,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:19,298 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36587 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T01:52:19,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:19,299 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 63be67e37d72759b0b7ada06089b33da 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T01:52:19,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/89c8815813f140d29132b61e3d36b96b is 1080, key is row0001/info:/1733709139123/Put/seqid=0 2024-12-09T01:52:19,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741837_1013 (size=6033) 2024-12-09T01:52:19,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741837_1013 (size=6033) 2024-12-09T01:52:19,322 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/89c8815813f140d29132b61e3d36b96b 2024-12-09T01:52:19,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/89c8815813f140d29132b61e3d36b96b as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/89c8815813f140d29132b61e3d36b96b 2024-12-09T01:52:19,334 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/89c8815813f140d29132b61e3d36b96b, entries=1, sequenceid=5, filesize=5.9 K 2024-12-09T01:52:19,335 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 63be67e37d72759b0b7ada06089b33da in 36ms, sequenceid=5, compaction requested=false 2024-12-09T01:52:19,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 63be67e37d72759b0b7ada06089b33da: 2024-12-09T01:52:19,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:19,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T01:52:19,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T01:52:19,343 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T01:52:19,343 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 203 msec 2024-12-09T01:52:19,346 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 213 msec 2024-12-09T01:52:19,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:20,153 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:20,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:20,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:21,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:21,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:21,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:22,154 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:22,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:22,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:23,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:23,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:23,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:24,155 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:24,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:24,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:25,156 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:25,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:25,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:26,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:26,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:26,976 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:27,157 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:27,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:27,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:28,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:28,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:28,977 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:29,158 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:29,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 after 68048ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:52:29,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T01:52:29,175 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T01:52:29,178 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T01:52:29,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T01:52:29,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-09T01:52:29,181 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T01:52:29,182 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T01:52:29,182 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T01:52:29,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:29,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta after 68047ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:52:29,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36587 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-12-09T01:52:29,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:29,336 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 63be67e37d72759b0b7ada06089b33da 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T01:52:29,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/3ccafe97d30f44bfb668ec880ad0b673 is 1080, key is row0002/info:/1733709149176/Put/seqid=0 2024-12-09T01:52:29,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741838_1014 (size=6033) 2024-12-09T01:52:29,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741838_1014 (size=6033) 2024-12-09T01:52:29,350 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/3ccafe97d30f44bfb668ec880ad0b673 2024-12-09T01:52:29,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/3ccafe97d30f44bfb668ec880ad0b673 as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/3ccafe97d30f44bfb668ec880ad0b673 2024-12-09T01:52:29,364 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/3ccafe97d30f44bfb668ec880ad0b673, entries=1, sequenceid=9, filesize=5.9 K 2024-12-09T01:52:29,366 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 63be67e37d72759b0b7ada06089b33da in 30ms, sequenceid=9, compaction requested=false 2024-12-09T01:52:29,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 63be67e37d72759b0b7ada06089b33da: 2024-12-09T01:52:29,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:29,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-12-09T01:52:29,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-12-09T01:52:29,371 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T01:52:29,371 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 186 msec 2024-12-09T01:52:29,374 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 194 msec 2024-12-09T01:52:29,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:30,159 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:30,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:30,978 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:31,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:31,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:31,979 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:32,160 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:32,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:32,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:33,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:33,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:33,980 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:34,161 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:34,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:34,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:35,162 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:35,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:35,981 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:36,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:36,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:36,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:37,163 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:37,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:37,982 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:38,123 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T01:52:38,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:38,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:38,983 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:39,164 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:39,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-12-09T01:52:39,285 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T01:52:39,288 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C36587%2C1733709128181.1733709159288 2024-12-09T01:52:39,294 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:39,294 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:39,295 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:39,295 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:39,295 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:39,295 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181/ef6f18c58dc9%2C36587%2C1733709128181.1733709128576 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181/ef6f18c58dc9%2C36587%2C1733709128181.1733709159288 2024-12-09T01:52:39,296 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35567:35567),(127.0.0.1/127.0.0.1:42323:42323)] 2024-12-09T01:52:39,296 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181/ef6f18c58dc9%2C36587%2C1733709128181.1733709128576 is not closed yet, will try archiving it next time 2024-12-09T01:52:39,297 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T01:52:39,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741833_1009 (size=5546) 2024-12-09T01:52:39,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741833_1009 (size=5546) 2024-12-09T01:52:39,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T01:52:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-09T01:52:39,299 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T01:52:39,300 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T01:52:39,301 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T01:52:39,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36587 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-12-09T01:52:39,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:39,454 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 63be67e37d72759b0b7ada06089b33da 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T01:52:39,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/e349110a663d4f2493cb1e13ae25d691 is 1080, key is row0003/info:/1733709159286/Put/seqid=0 2024-12-09T01:52:39,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741840_1016 (size=6033) 2024-12-09T01:52:39,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741840_1016 (size=6033) 2024-12-09T01:52:39,464 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/e349110a663d4f2493cb1e13ae25d691 2024-12-09T01:52:39,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/e349110a663d4f2493cb1e13ae25d691 as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/e349110a663d4f2493cb1e13ae25d691 2024-12-09T01:52:39,476 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/e349110a663d4f2493cb1e13ae25d691, entries=1, sequenceid=13, filesize=5.9 K 2024-12-09T01:52:39,477 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 63be67e37d72759b0b7ada06089b33da in 23ms, sequenceid=13, compaction requested=true 2024-12-09T01:52:39,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 63be67e37d72759b0b7ada06089b33da: 2024-12-09T01:52:39,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:39,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-12-09T01:52:39,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-12-09T01:52:39,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-12-09T01:52:39,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-12-09T01:52:39,485 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 186 msec 2024-12-09T01:52:39,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:40,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:40,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:40,984 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:41,165 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:41,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:41,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:42,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:42,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:42,985 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:43,166 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:43,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:43,986 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:44,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:44,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:44,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:45,167 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:45,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:45,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:46,168 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:46,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:46,987 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:47,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:47,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:47,988 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:48,169 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:48,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:48,983 INFO [master/ef6f18c58dc9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T01:52:48,983 INFO [master/ef6f18c58dc9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T01:52:48,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:49,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:49,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:49,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-12-09T01:52:49,396 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T01:52:49,396 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T01:52:49,397 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T01:52:49,397 DEBUG [Time-limited test {}] regionserver.HStore(1541): 63be67e37d72759b0b7ada06089b33da/info is initiating minor compaction (all files) 2024-12-09T01:52:49,397 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:52:49,397 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:52:49,398 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 63be67e37d72759b0b7ada06089b33da/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:49,398 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/89c8815813f140d29132b61e3d36b96b, hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/3ccafe97d30f44bfb668ec880ad0b673, hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/e349110a663d4f2493cb1e13ae25d691] into tmpdir=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp, totalSize=17.7 K 2024-12-09T01:52:49,398 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 89c8815813f140d29132b61e3d36b96b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1733709139123 2024-12-09T01:52:49,399 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 3ccafe97d30f44bfb668ec880ad0b673, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1733709149176 2024-12-09T01:52:49,399 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting e349110a663d4f2493cb1e13ae25d691, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733709159286 2024-12-09T01:52:49,410 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 63be67e37d72759b0b7ada06089b33da#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:52:49,411 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/1180866201904c17bc8a04ec86cb51b9 is 1080, key is row0001/info:/1733709139123/Put/seqid=0 2024-12-09T01:52:49,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741841_1017 (size=8296) 2024-12-09T01:52:49,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741841_1017 (size=8296) 2024-12-09T01:52:49,423 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/1180866201904c17bc8a04ec86cb51b9 as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/1180866201904c17bc8a04ec86cb51b9 2024-12-09T01:52:49,430 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 63be67e37d72759b0b7ada06089b33da/info of 63be67e37d72759b0b7ada06089b33da into 1180866201904c17bc8a04ec86cb51b9(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:52:49,430 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 63be67e37d72759b0b7ada06089b33da: 2024-12-09T01:52:49,433 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C36587%2C1733709128181.1733709169433 2024-12-09T01:52:49,439 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:49,439 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:49,439 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:49,439 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:49,439 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:49,439 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181/ef6f18c58dc9%2C36587%2C1733709128181.1733709159288 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181/ef6f18c58dc9%2C36587%2C1733709128181.1733709169433 2024-12-09T01:52:49,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741839_1015 (size=2520) 2024-12-09T01:52:49,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741839_1015 (size=2520) 2024-12-09T01:52:49,446 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181/ef6f18c58dc9%2C36587%2C1733709128181.1733709128576 to hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/oldWALs/ef6f18c58dc9%2C36587%2C1733709128181.1733709128576 2024-12-09T01:52:49,446 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42323:42323),(127.0.0.1/127.0.0.1:35567:35567)] 2024-12-09T01:52:49,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T01:52:49,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T01:52:49,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T01:52:49,449 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-12-09T01:52:49,450 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T01:52:49,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T01:52:49,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36587 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-12-09T01:52:49,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:49,604 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 63be67e37d72759b0b7ada06089b33da 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T01:52:49,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/952e686173b4451fbd839f6854d4d113 is 1080, key is row0000/info:/1733709169431/Put/seqid=0 2024-12-09T01:52:49,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741843_1019 (size=6033) 2024-12-09T01:52:49,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741843_1019 (size=6033) 2024-12-09T01:52:49,615 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/952e686173b4451fbd839f6854d4d113 2024-12-09T01:52:49,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/952e686173b4451fbd839f6854d4d113 as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/952e686173b4451fbd839f6854d4d113 2024-12-09T01:52:49,626 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/952e686173b4451fbd839f6854d4d113, entries=1, sequenceid=18, filesize=5.9 K 2024-12-09T01:52:49,627 INFO [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 63be67e37d72759b0b7ada06089b33da in 23ms, sequenceid=18, compaction requested=false 2024-12-09T01:52:49,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 63be67e37d72759b0b7ada06089b33da: 2024-12-09T01:52:49,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:49,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-09T01:52:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-09T01:52:49,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-12-09T01:52:49,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-12-09T01:52:49,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 185 msec 2024-12-09T01:52:49,989 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:50,170 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:50,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:50,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:51,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:51,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:51,990 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:52,171 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:52,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:52,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:53,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:53,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:53,991 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:54,172 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:54,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:54,356 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 63be67e37d72759b0b7ada06089b33da, had cached 0 bytes from a total of 14329 2024-12-09T01:52:54,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:55,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:55,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:55,992 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:56,173 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:56,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:56,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:57,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:57,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:57,993 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:58,174 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:58,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:58,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:59,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:59,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:59,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34877 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-12-09T01:52:59,506 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-12-09T01:52:59,508 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C36587%2C1733709128181.1733709179508 2024-12-09T01:52:59,514 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,514 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,514 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,515 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,515 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,515 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181/ef6f18c58dc9%2C36587%2C1733709128181.1733709169433 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181/ef6f18c58dc9%2C36587%2C1733709128181.1733709179508 2024-12-09T01:52:59,516 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42323:42323),(127.0.0.1/127.0.0.1:35567:35567)] 2024-12-09T01:52:59,516 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181/ef6f18c58dc9%2C36587%2C1733709128181.1733709169433 is not closed yet, will try archiving it next time 2024-12-09T01:52:59,516 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/WALs/ef6f18c58dc9,36587,1733709128181/ef6f18c58dc9%2C36587%2C1733709128181.1733709159288 to hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/oldWALs/ef6f18c58dc9%2C36587%2C1733709128181.1733709159288 2024-12-09T01:52:59,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T01:52:59,516 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:52:59,516 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:52:59,516 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:52:59,516 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:52:59,516 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T01:52:59,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741842_1018 (size=2026) 2024-12-09T01:52:59,516 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=224711458, stopped=false 2024-12-09T01:52:59,517 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef6f18c58dc9,34877,1733709128139 2024-12-09T01:52:59,517 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T01:52:59,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741842_1018 (size=2026) 2024-12-09T01:52:59,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:52:59,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:52:59,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:59,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:59,521 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:52:59,521 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:52:59,521 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:52:59,521 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:52:59,521 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,36587,1733709128181' ***** 2024-12-09T01:52:59,521 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:52:59,522 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:52:59,522 INFO [RS:0;ef6f18c58dc9:36587 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:52:59,522 INFO [RS:0;ef6f18c58dc9:36587 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:52:59,522 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(3091): Received CLOSE for 63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:59,522 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:52:59,522 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:52:59,522 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:52:59,522 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:59,522 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:52:59,522 INFO [RS:0;ef6f18c58dc9:36587 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef6f18c58dc9:36587. 2024-12-09T01:52:59,522 DEBUG [RS:0;ef6f18c58dc9:36587 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:52:59,522 DEBUG [RS:0;ef6f18c58dc9:36587 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:52:59,522 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:52:59,522 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 63be67e37d72759b0b7ada06089b33da, disabling compactions & flushes 2024-12-09T01:52:59,522 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:52:59,522 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:52:59,522 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:59,522 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T01:52:59,522 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:59,523 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. after waiting 0 ms 2024-12-09T01:52:59,523 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:59,523 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 63be67e37d72759b0b7ada06089b33da 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-12-09T01:52:59,523 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-09T01:52:59,523 DEBUG [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 63be67e37d72759b0b7ada06089b33da=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.} 2024-12-09T01:52:59,523 DEBUG [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 63be67e37d72759b0b7ada06089b33da 2024-12-09T01:52:59,523 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:52:59,523 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:52:59,523 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:52:59,523 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:52:59,523 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:52:59,523 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-12-09T01:52:59,528 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/d2ee9c2ce36044e3a93699812e8525b8 is 1080, key is row0001/info:/1733709179507/Put/seqid=0 2024-12-09T01:52:59,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741845_1021 (size=6033) 2024-12-09T01:52:59,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741845_1021 (size=6033) 2024-12-09T01:52:59,537 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/d2ee9c2ce36044e3a93699812e8525b8 2024-12-09T01:52:59,541 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/.tmp/info/979d2a11810c4edcab1972cdf5b7f81f is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da./info:regioninfo/1733709129371/Put/seqid=0 2024-12-09T01:52:59,544 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/.tmp/info/d2ee9c2ce36044e3a93699812e8525b8 as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/d2ee9c2ce36044e3a93699812e8525b8 2024-12-09T01:52:59,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741846_1022 (size=7308) 2024-12-09T01:52:59,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741846_1022 (size=7308) 2024-12-09T01:52:59,548 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/.tmp/info/979d2a11810c4edcab1972cdf5b7f81f 2024-12-09T01:52:59,550 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/d2ee9c2ce36044e3a93699812e8525b8, entries=1, sequenceid=22, filesize=5.9 K 2024-12-09T01:52:59,551 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 63be67e37d72759b0b7ada06089b33da in 28ms, sequenceid=22, compaction requested=true 2024-12-09T01:52:59,551 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/89c8815813f140d29132b61e3d36b96b, hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/3ccafe97d30f44bfb668ec880ad0b673, hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/e349110a663d4f2493cb1e13ae25d691] to archive 2024-12-09T01:52:59,552 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T01:52:59,554 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/89c8815813f140d29132b61e3d36b96b to hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/89c8815813f140d29132b61e3d36b96b 2024-12-09T01:52:59,556 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/3ccafe97d30f44bfb668ec880ad0b673 to hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/3ccafe97d30f44bfb668ec880ad0b673 2024-12-09T01:52:59,557 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/e349110a663d4f2493cb1e13ae25d691 to hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/info/e349110a663d4f2493cb1e13ae25d691 2024-12-09T01:52:59,558 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=ef6f18c58dc9:34877 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-09T01:52:59,558 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [89c8815813f140d29132b61e3d36b96b=6033, 3ccafe97d30f44bfb668ec880ad0b673=6033, e349110a663d4f2493cb1e13ae25d691=6033] 2024-12-09T01:52:59,562 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/63be67e37d72759b0b7ada06089b33da/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-12-09T01:52:59,563 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:59,563 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 63be67e37d72759b0b7ada06089b33da: Waiting for close lock at 1733709179522Running coprocessor pre-close hooks at 1733709179522Disabling compacts and flushes for region at 1733709179522Disabling writes for close at 1733709179523 (+1 ms)Obtaining lock to block concurrent updates at 1733709179523Preparing flush snapshotting stores in 63be67e37d72759b0b7ada06089b33da at 1733709179523Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1733709179523Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. at 1733709179524 (+1 ms)Flushing 63be67e37d72759b0b7ada06089b33da/info: creating writer at 1733709179524Flushing 63be67e37d72759b0b7ada06089b33da/info: appending metadata at 1733709179527 (+3 ms)Flushing 63be67e37d72759b0b7ada06089b33da/info: closing flushed file at 1733709179527Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2fceec4d: reopening flushed file at 1733709179543 (+16 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 63be67e37d72759b0b7ada06089b33da in 28ms, sequenceid=22, compaction requested=true at 1733709179551 (+8 ms)Writing region close event to WAL at 1733709179558 (+7 ms)Running coprocessor post-close hooks at 1733709179563 (+5 ms)Closed at 1733709179563 2024-12-09T01:52:59,563 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1733709129012.63be67e37d72759b0b7ada06089b33da. 2024-12-09T01:52:59,569 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/.tmp/ns/54b7cb9252c148a783f915214f4227ea is 43, key is default/ns:d/1733709128967/Put/seqid=0 2024-12-09T01:52:59,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741847_1023 (size=5153) 2024-12-09T01:52:59,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741847_1023 (size=5153) 2024-12-09T01:52:59,574 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/.tmp/ns/54b7cb9252c148a783f915214f4227ea 2024-12-09T01:52:59,594 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/.tmp/table/ebd639e7d1dc4149b2123a14bdad0019 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1733709129382/Put/seqid=0 2024-12-09T01:52:59,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741848_1024 (size=5508) 2024-12-09T01:52:59,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741848_1024 (size=5508) 2024-12-09T01:52:59,599 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/.tmp/table/ebd639e7d1dc4149b2123a14bdad0019 2024-12-09T01:52:59,605 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/.tmp/info/979d2a11810c4edcab1972cdf5b7f81f as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/info/979d2a11810c4edcab1972cdf5b7f81f 2024-12-09T01:52:59,609 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/info/979d2a11810c4edcab1972cdf5b7f81f, entries=10, sequenceid=11, filesize=7.1 K 2024-12-09T01:52:59,610 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/.tmp/ns/54b7cb9252c148a783f915214f4227ea as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/ns/54b7cb9252c148a783f915214f4227ea 2024-12-09T01:52:59,615 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/ns/54b7cb9252c148a783f915214f4227ea, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T01:52:59,615 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/.tmp/table/ebd639e7d1dc4149b2123a14bdad0019 as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/table/ebd639e7d1dc4149b2123a14bdad0019 2024-12-09T01:52:59,620 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/table/ebd639e7d1dc4149b2123a14bdad0019, entries=2, sequenceid=11, filesize=5.4 K 2024-12-09T01:52:59,621 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false 2024-12-09T01:52:59,625 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T01:52:59,626 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:52:59,626 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:52:59,626 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709179523Running coprocessor pre-close hooks at 1733709179523Disabling compacts and flushes for region at 1733709179523Disabling writes for close at 1733709179523Obtaining lock to block concurrent updates at 1733709179523Preparing flush snapshotting stores in 1588230740 at 1733709179523Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1733709179524 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733709179524Flushing 1588230740/info: creating writer at 1733709179524Flushing 1588230740/info: appending metadata at 1733709179541 (+17 ms)Flushing 1588230740/info: closing flushed file at 1733709179541Flushing 1588230740/ns: creating writer at 1733709179553 (+12 ms)Flushing 1588230740/ns: appending metadata at 1733709179569 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1733709179569Flushing 1588230740/table: creating writer at 1733709179579 (+10 ms)Flushing 1588230740/table: appending metadata at 1733709179593 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733709179593Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@57b3b51f: reopening flushed file at 1733709179604 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7851520a: reopening flushed file at 1733709179609 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@154993d8: reopening flushed file at 1733709179615 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 98ms, sequenceid=11, compaction requested=false at 1733709179621 (+6 ms)Writing region close event to WAL at 1733709179622 (+1 ms)Running coprocessor post-close hooks at 1733709179626 (+4 ms)Closed at 1733709179626 2024-12-09T01:52:59,626 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T01:52:59,723 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,36587,1733709128181; all regions closed. 2024-12-09T01:52:59,724 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,724 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,724 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,724 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,724 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741834_1010 (size=3306) 2024-12-09T01:52:59,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741834_1010 (size=3306) 2024-12-09T01:52:59,729 DEBUG [RS:0;ef6f18c58dc9:36587 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/oldWALs 2024-12-09T01:52:59,729 INFO [RS:0;ef6f18c58dc9:36587 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C36587%2C1733709128181.meta:.meta(num 1733709128928) 2024-12-09T01:52:59,729 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,729 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,729 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,729 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,729 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741844_1020 (size=1252) 2024-12-09T01:52:59,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741844_1020 (size=1252) 2024-12-09T01:52:59,734 DEBUG [RS:0;ef6f18c58dc9:36587 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/oldWALs 2024-12-09T01:52:59,734 INFO [RS:0;ef6f18c58dc9:36587 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C36587%2C1733709128181:(num 1733709179508) 2024-12-09T01:52:59,734 DEBUG [RS:0;ef6f18c58dc9:36587 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:52:59,734 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:52:59,734 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:52:59,734 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T01:52:59,734 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:52:59,734 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:52:59,734 INFO [RS:0;ef6f18c58dc9:36587 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36587 2024-12-09T01:52:59,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,36587,1733709128181 2024-12-09T01:52:59,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:52:59,736 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:52:59,738 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,36587,1733709128181] 2024-12-09T01:52:59,739 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,36587,1733709128181 already deleted, retry=false 2024-12-09T01:52:59,739 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,36587,1733709128181 expired; onlineServers=0 2024-12-09T01:52:59,739 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef6f18c58dc9,34877,1733709128139' ***** 2024-12-09T01:52:59,739 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T01:52:59,739 INFO [M:0;ef6f18c58dc9:34877 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:52:59,739 INFO [M:0;ef6f18c58dc9:34877 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:52:59,739 DEBUG [M:0;ef6f18c58dc9:34877 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T01:52:59,739 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T01:52:59,739 DEBUG [M:0;ef6f18c58dc9:34877 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T01:52:59,739 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709128329 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709128329,5,FailOnTimeoutGroup] 2024-12-09T01:52:59,739 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709128329 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709128329,5,FailOnTimeoutGroup] 2024-12-09T01:52:59,739 INFO [M:0;ef6f18c58dc9:34877 {}] hbase.ChoreService(370): Chore service for: master/ef6f18c58dc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T01:52:59,739 INFO [M:0;ef6f18c58dc9:34877 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:52:59,740 DEBUG [M:0;ef6f18c58dc9:34877 {}] master.HMaster(1795): Stopping service threads 2024-12-09T01:52:59,740 INFO [M:0;ef6f18c58dc9:34877 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T01:52:59,740 INFO [M:0;ef6f18c58dc9:34877 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:52:59,740 INFO [M:0;ef6f18c58dc9:34877 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T01:52:59,740 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T01:52:59,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T01:52:59,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:52:59,741 DEBUG [M:0;ef6f18c58dc9:34877 {}] zookeeper.ZKUtil(347): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T01:52:59,741 WARN [M:0;ef6f18c58dc9:34877 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T01:52:59,741 INFO [M:0;ef6f18c58dc9:34877 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/.lastflushedseqids 2024-12-09T01:52:59,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741849_1025 (size=130) 2024-12-09T01:52:59,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741849_1025 (size=130) 2024-12-09T01:52:59,747 INFO [M:0;ef6f18c58dc9:34877 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T01:52:59,747 INFO [M:0;ef6f18c58dc9:34877 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T01:52:59,747 DEBUG [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:52:59,747 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:52:59,748 DEBUG [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:52:59,748 DEBUG [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:52:59,748 DEBUG [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:52:59,748 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.56 KB heapSize=54.94 KB 2024-12-09T01:52:59,771 DEBUG [M:0;ef6f18c58dc9:34877 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1abc5b55c68a4428a23318293bf4c7a6 is 82, key is hbase:meta,,1/info:regioninfo/1733709128951/Put/seqid=0 2024-12-09T01:52:59,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741850_1026 (size=5672) 2024-12-09T01:52:59,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741850_1026 (size=5672) 2024-12-09T01:52:59,776 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1abc5b55c68a4428a23318293bf4c7a6 2024-12-09T01:52:59,795 DEBUG [M:0;ef6f18c58dc9:34877 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c80256b85f5b41ae90d64da1e369e2ac is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733709129387/Put/seqid=0 2024-12-09T01:52:59,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741851_1027 (size=7819) 2024-12-09T01:52:59,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741851_1027 (size=7819) 2024-12-09T01:52:59,802 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.95 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c80256b85f5b41ae90d64da1e369e2ac 2024-12-09T01:52:59,807 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c80256b85f5b41ae90d64da1e369e2ac 2024-12-09T01:52:59,822 DEBUG [M:0;ef6f18c58dc9:34877 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e494bb214b4c485fab99c8c8136ff8b2 is 69, key is ef6f18c58dc9,36587,1733709128181/rs:state/1733709128415/Put/seqid=0 2024-12-09T01:52:59,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741852_1028 (size=5156) 2024-12-09T01:52:59,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741852_1028 (size=5156) 2024-12-09T01:52:59,828 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e494bb214b4c485fab99c8c8136ff8b2 2024-12-09T01:52:59,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:52:59,838 INFO [RS:0;ef6f18c58dc9:36587 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:52:59,838 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36587-0x10074799aa40001, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:52:59,838 INFO [RS:0;ef6f18c58dc9:36587 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,36587,1733709128181; zookeeper connection closed. 2024-12-09T01:52:59,838 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@404a0253 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@404a0253 2024-12-09T01:52:59,838 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T01:52:59,848 DEBUG [M:0;ef6f18c58dc9:34877 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a3629a7d07e74ca9866f605e8394ec84 is 52, key is load_balancer_on/state:d/1733709129008/Put/seqid=0 2024-12-09T01:52:59,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741853_1029 (size=5056) 2024-12-09T01:52:59,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741853_1029 (size=5056) 2024-12-09T01:52:59,853 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a3629a7d07e74ca9866f605e8394ec84 2024-12-09T01:52:59,859 DEBUG [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1abc5b55c68a4428a23318293bf4c7a6 as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1abc5b55c68a4428a23318293bf4c7a6 2024-12-09T01:52:59,863 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1abc5b55c68a4428a23318293bf4c7a6, entries=8, sequenceid=121, filesize=5.5 K 2024-12-09T01:52:59,864 DEBUG [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c80256b85f5b41ae90d64da1e369e2ac as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c80256b85f5b41ae90d64da1e369e2ac 2024-12-09T01:52:59,869 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c80256b85f5b41ae90d64da1e369e2ac 2024-12-09T01:52:59,869 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c80256b85f5b41ae90d64da1e369e2ac, entries=14, sequenceid=121, filesize=7.6 K 2024-12-09T01:52:59,870 DEBUG [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e494bb214b4c485fab99c8c8136ff8b2 as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e494bb214b4c485fab99c8c8136ff8b2 2024-12-09T01:52:59,874 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e494bb214b4c485fab99c8c8136ff8b2, entries=1, sequenceid=121, filesize=5.0 K 2024-12-09T01:52:59,875 DEBUG [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a3629a7d07e74ca9866f605e8394ec84 as hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a3629a7d07e74ca9866f605e8394ec84 2024-12-09T01:52:59,879 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35509/user/jenkins/test-data/f046a980-5110-7c54-86d4-3efaf8ee96f2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a3629a7d07e74ca9866f605e8394ec84, entries=1, sequenceid=121, filesize=4.9 K 2024-12-09T01:52:59,881 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.56 KB/44602, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=121, compaction requested=false 2024-12-09T01:52:59,884 INFO [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:52:59,884 DEBUG [M:0;ef6f18c58dc9:34877 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709179747Disabling compacts and flushes for region at 1733709179747Disabling writes for close at 1733709179748 (+1 ms)Obtaining lock to block concurrent updates at 1733709179748Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733709179748Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44602, getHeapSize=56192, getOffHeapSize=0, getCellsCount=140 at 1733709179748Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733709179749 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733709179749Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733709179770 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733709179771 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733709179781 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733709179795 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733709179795Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733709179807 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733709179821 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733709179821Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733709179833 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733709179847 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733709179847Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@177dc1c: reopening flushed file at 1733709179858 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3d8e7121: reopening flushed file at 1733709179864 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27febaee: reopening flushed file at 1733709179869 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7e5b81d9: reopening flushed file at 1733709179874 (+5 ms)Finished flush of dataSize ~43.56 KB/44602, heapSize ~54.88 KB/56192, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=121, compaction requested=false at 1733709179881 (+7 ms)Writing region close event to WAL at 1733709179884 (+3 ms)Closed at 1733709179884 2024-12-09T01:52:59,885 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,885 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,885 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,885 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,885 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:52:59,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38611 is added to blk_1073741830_1006 (size=52999) 2024-12-09T01:52:59,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38615 is added to blk_1073741830_1006 (size=52999) 2024-12-09T01:52:59,888 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:52:59,888 INFO [M:0;ef6f18c58dc9:34877 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T01:52:59,888 INFO [M:0;ef6f18c58dc9:34877 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34877 2024-12-09T01:52:59,888 INFO [M:0;ef6f18c58dc9:34877 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:52:59,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:52:59,990 INFO [M:0;ef6f18c58dc9:34877 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:52:59,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34877-0x10074799aa40000, quorum=127.0.0.1:60018, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:52:59,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@fc981fd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:52:59,993 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1367dc96{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:52:59,993 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:52:59,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@190ad9e7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:52:59,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20f59884{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/hadoop.log.dir/,STOPPED} 2024-12-09T01:52:59,994 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:52:59,995 WARN [BP-836402380-172.17.0.2-1733709127439 heartbeating to localhost/127.0.0.1:35509 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:52:59,995 WARN [BP-836402380-172.17.0.2-1733709127439 heartbeating to localhost/127.0.0.1:35509 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-836402380-172.17.0.2-1733709127439 (Datanode Uuid cdf93d58-db8a-458f-9997-47e50d19a7ce) service to localhost/127.0.0.1:35509 2024-12-09T01:52:59,995 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:52:59,995 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:52:59,996 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/cluster_ca30ed38-7fe8-0dd8-9f41-cfcd317b33b1/data/data3/current/BP-836402380-172.17.0.2-1733709127439 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:52:59,996 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/cluster_ca30ed38-7fe8-0dd8-9f41-cfcd317b33b1/data/data4/current/BP-836402380-172.17.0.2-1733709127439 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:52:59,996 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:52:59,998 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@48bfafbe{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:52:59,998 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b93cee9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:52:59,998 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:52:59,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b5b6aa1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:52:59,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e6ee234{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/hadoop.log.dir/,STOPPED} 2024-12-09T01:53:00,000 WARN [BP-836402380-172.17.0.2-1733709127439 heartbeating to localhost/127.0.0.1:35509 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:53:00,000 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:53:00,000 WARN [BP-836402380-172.17.0.2-1733709127439 heartbeating to localhost/127.0.0.1:35509 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-836402380-172.17.0.2-1733709127439 (Datanode Uuid 1b05c82e-8175-498d-8ea0-b0f1901cc57e) service to localhost/127.0.0.1:35509 2024-12-09T01:53:00,000 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:53:00,001 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/cluster_ca30ed38-7fe8-0dd8-9f41-cfcd317b33b1/data/data1/current/BP-836402380-172.17.0.2-1733709127439 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:53:00,001 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/cluster_ca30ed38-7fe8-0dd8-9f41-cfcd317b33b1/data/data2/current/BP-836402380-172.17.0.2-1733709127439 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:53:00,001 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:53:00,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62558ec9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:53:00,008 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@294894f5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:53:00,008 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:53:00,008 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@290f188f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:53:00,008 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70e475c9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/hadoop.log.dir/,STOPPED} 2024-12-09T01:53:00,016 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T01:53:00,033 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T01:53:00,043 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=208 (was 181) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35509 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/ef6f18c58dc9:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:35509 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35509 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35509 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35509 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35509 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:35509 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35509 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35509 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=186 (was 220), ProcessCount=11 (was 11), AvailableMemoryMB=11626 (was 11695) 2024-12-09T01:53:00,052 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=208, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=186, ProcessCount=11, AvailableMemoryMB=11627 2024-12-09T01:53:00,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T01:53:00,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/hadoop.log.dir so I do NOT create it in target/test-data/ed067f6f-7154-9227-f200-454673d252f7 2024-12-09T01:53:00,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cec41315-0b26-4262-8c4b-b16ffa494d0a/hadoop.tmp.dir so I do NOT create it in target/test-data/ed067f6f-7154-9227-f200-454673d252f7 2024-12-09T01:53:00,053 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/cluster_04852131-dbb5-eb0c-0678-4e846323d3fa, deleteOnExit=true 2024-12-09T01:53:00,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T01:53:00,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/test.cache.data in system properties and HBase conf 2024-12-09T01:53:00,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T01:53:00,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/hadoop.log.dir in system properties and HBase conf 2024-12-09T01:53:00,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T01:53:00,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T01:53:00,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T01:53:00,053 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T01:53:00,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:53:00,053 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:53:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T01:53:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:53:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T01:53:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T01:53:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:53:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:53:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T01:53:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/nfs.dump.dir in system properties and HBase conf 2024-12-09T01:53:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/java.io.tmpdir in system properties and HBase conf 2024-12-09T01:53:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:53:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T01:53:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T01:53:00,067 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:53:00,134 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:53:00,138 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:53:00,139 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:53:00,139 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:53:00,139 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:53:00,140 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:53:00,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@240fc28c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:53:00,140 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@314bb674{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:53:00,175 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:00,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:00,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25570184{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/java.io.tmpdir/jetty-localhost-39411-hadoop-hdfs-3_4_1-tests_jar-_-any-10027881462408822142/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:53:00,254 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@148b0695{HTTP/1.1, (http/1.1)}{localhost:39411} 2024-12-09T01:53:00,254 INFO [Time-limited test {}] server.Server(415): Started @242991ms 2024-12-09T01:53:00,266 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:53:00,333 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:53:00,337 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:53:00,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:53:00,338 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:53:00,338 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:53:00,338 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d639fc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:53:00,339 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50f209dc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:53:00,432 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:53:00,453 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5411f427{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/java.io.tmpdir/jetty-localhost-34943-hadoop-hdfs-3_4_1-tests_jar-_-any-869932100432578181/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:53:00,454 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10e59f6e{HTTP/1.1, (http/1.1)}{localhost:34943} 2024-12-09T01:53:00,454 INFO [Time-limited test {}] server.Server(415): Started @243191ms 2024-12-09T01:53:00,455 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:53:00,484 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:53:00,486 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:53:00,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:53:00,487 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:53:00,487 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:53:00,488 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@314e7370{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:53:00,488 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@425d5d71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:53:00,560 WARN [Thread-1970 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/cluster_04852131-dbb5-eb0c-0678-4e846323d3fa/data/data1/current/BP-352443598-172.17.0.2-1733709180084/current, will proceed with Du for space computation calculation, 2024-12-09T01:53:00,560 WARN [Thread-1971 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/cluster_04852131-dbb5-eb0c-0678-4e846323d3fa/data/data2/current/BP-352443598-172.17.0.2-1733709180084/current, will proceed with Du for space computation calculation, 2024-12-09T01:53:00,582 WARN [Thread-1949 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:53:00,584 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xea998e26917c0de0 with lease ID 0x6ec08018252daa32: Processing first storage report for DS-634fe089-54b8-41f0-8ac7-3268393258cf from datanode DatanodeRegistration(127.0.0.1:34061, datanodeUuid=b8070491-daa1-45cf-b793-5fca58ffabad, infoPort=38505, infoSecurePort=0, ipcPort=39341, storageInfo=lv=-57;cid=testClusterID;nsid=465482464;c=1733709180084) 2024-12-09T01:53:00,585 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea998e26917c0de0 with lease ID 0x6ec08018252daa32: from storage DS-634fe089-54b8-41f0-8ac7-3268393258cf node DatanodeRegistration(127.0.0.1:34061, datanodeUuid=b8070491-daa1-45cf-b793-5fca58ffabad, infoPort=38505, infoSecurePort=0, ipcPort=39341, storageInfo=lv=-57;cid=testClusterID;nsid=465482464;c=1733709180084), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:53:00,585 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xea998e26917c0de0 with lease ID 0x6ec08018252daa32: Processing first storage report for DS-f85ba438-ea7f-483b-9beb-927eead798a7 from datanode DatanodeRegistration(127.0.0.1:34061, datanodeUuid=b8070491-daa1-45cf-b793-5fca58ffabad, infoPort=38505, infoSecurePort=0, ipcPort=39341, storageInfo=lv=-57;cid=testClusterID;nsid=465482464;c=1733709180084) 2024-12-09T01:53:00,585 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea998e26917c0de0 with lease ID 0x6ec08018252daa32: from storage DS-f85ba438-ea7f-483b-9beb-927eead798a7 node DatanodeRegistration(127.0.0.1:34061, datanodeUuid=b8070491-daa1-45cf-b793-5fca58ffabad, infoPort=38505, infoSecurePort=0, ipcPort=39341, storageInfo=lv=-57;cid=testClusterID;nsid=465482464;c=1733709180084), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:53:00,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1d7cc900{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/java.io.tmpdir/jetty-localhost-45465-hadoop-hdfs-3_4_1-tests_jar-_-any-5350987508070972524/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:53:00,622 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20d96a0d{HTTP/1.1, (http/1.1)}{localhost:45465} 2024-12-09T01:53:00,622 INFO [Time-limited test {}] server.Server(415): Started @243359ms 2024-12-09T01:53:00,623 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:53:00,716 WARN [Thread-1997 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/cluster_04852131-dbb5-eb0c-0678-4e846323d3fa/data/data4/current/BP-352443598-172.17.0.2-1733709180084/current, will proceed with Du for space computation calculation, 2024-12-09T01:53:00,716 WARN [Thread-1996 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/cluster_04852131-dbb5-eb0c-0678-4e846323d3fa/data/data3/current/BP-352443598-172.17.0.2-1733709180084/current, will proceed with Du for space computation calculation, 2024-12-09T01:53:00,733 WARN [Thread-1985 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:53:00,735 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf160c7fe753707b2 with lease ID 0x6ec08018252daa33: Processing first storage report for DS-fd457290-37e2-491f-a853-742243fb2c0c from datanode DatanodeRegistration(127.0.0.1:42201, datanodeUuid=09a3cf37-e459-4f33-ab4e-6bff9d90af3a, infoPort=33151, infoSecurePort=0, ipcPort=35615, storageInfo=lv=-57;cid=testClusterID;nsid=465482464;c=1733709180084) 2024-12-09T01:53:00,735 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf160c7fe753707b2 with lease ID 0x6ec08018252daa33: from storage DS-fd457290-37e2-491f-a853-742243fb2c0c node DatanodeRegistration(127.0.0.1:42201, datanodeUuid=09a3cf37-e459-4f33-ab4e-6bff9d90af3a, infoPort=33151, infoSecurePort=0, ipcPort=35615, storageInfo=lv=-57;cid=testClusterID;nsid=465482464;c=1733709180084), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:53:00,735 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf160c7fe753707b2 with lease ID 0x6ec08018252daa33: Processing first storage report for DS-c1390eaf-a394-4adf-8e5e-f07a71faac32 from datanode DatanodeRegistration(127.0.0.1:42201, datanodeUuid=09a3cf37-e459-4f33-ab4e-6bff9d90af3a, infoPort=33151, infoSecurePort=0, ipcPort=35615, storageInfo=lv=-57;cid=testClusterID;nsid=465482464;c=1733709180084) 2024-12-09T01:53:00,735 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf160c7fe753707b2 with lease ID 0x6ec08018252daa33: from storage DS-c1390eaf-a394-4adf-8e5e-f07a71faac32 node DatanodeRegistration(127.0.0.1:42201, datanodeUuid=09a3cf37-e459-4f33-ab4e-6bff9d90af3a, infoPort=33151, infoSecurePort=0, ipcPort=35615, storageInfo=lv=-57;cid=testClusterID;nsid=465482464;c=1733709180084), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:53:00,745 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7 2024-12-09T01:53:00,748 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/cluster_04852131-dbb5-eb0c-0678-4e846323d3fa/zookeeper_0, clientPort=54887, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/cluster_04852131-dbb5-eb0c-0678-4e846323d3fa/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/cluster_04852131-dbb5-eb0c-0678-4e846323d3fa/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T01:53:00,749 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54887 2024-12-09T01:53:00,749 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:53:00,750 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:53:00,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:53:00,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:53:00,761 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5 with version=8 2024-12-09T01:53:00,761 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/hbase-staging 2024-12-09T01:53:00,763 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:53:00,763 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:53:00,763 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:53:00,763 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:53:00,763 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:53:00,763 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:53:00,763 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T01:53:00,763 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:53:00,764 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33543 2024-12-09T01:53:00,765 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33543 connecting to ZooKeeper ensemble=127.0.0.1:54887 2024-12-09T01:53:00,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:335430x0, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:53:00,771 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33543-0x100747a68320000 connected 2024-12-09T01:53:00,785 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:53:00,786 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:53:00,788 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:53:00,788 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5, hbase.cluster.distributed=false 2024-12-09T01:53:00,790 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:53:00,790 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33543 2024-12-09T01:53:00,790 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33543 2024-12-09T01:53:00,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33543 2024-12-09T01:53:00,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33543 2024-12-09T01:53:00,791 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33543 2024-12-09T01:53:00,806 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:53:00,806 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:53:00,806 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:53:00,806 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:53:00,806 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:53:00,806 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:53:00,806 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:53:00,806 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:53:00,807 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34803 2024-12-09T01:53:00,808 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34803 connecting to ZooKeeper ensemble=127.0.0.1:54887 2024-12-09T01:53:00,809 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:53:00,810 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:53:00,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348030x0, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:53:00,814 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:348030x0, quorum=127.0.0.1:54887, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:53:00,814 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34803-0x100747a68320001 connected 2024-12-09T01:53:00,815 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:53:00,815 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:53:00,816 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:53:00,817 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:53:00,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34803 2024-12-09T01:53:00,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34803 2024-12-09T01:53:00,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34803 2024-12-09T01:53:00,818 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34803 2024-12-09T01:53:00,818 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34803 2024-12-09T01:53:00,830 DEBUG [M:0;ef6f18c58dc9:33543 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef6f18c58dc9:33543 2024-12-09T01:53:00,830 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef6f18c58dc9,33543,1733709180763 2024-12-09T01:53:00,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:53:00,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:53:00,832 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef6f18c58dc9,33543,1733709180763 2024-12-09T01:53:00,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:53:00,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:53:00,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:53:00,834 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T01:53:00,835 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef6f18c58dc9,33543,1733709180763 from backup master directory 2024-12-09T01:53:00,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef6f18c58dc9,33543,1733709180763 2024-12-09T01:53:00,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:53:00,836 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:53:00,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:53:00,836 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef6f18c58dc9,33543,1733709180763 2024-12-09T01:53:00,840 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/hbase.id] with ID: 74a4ddeb-8a4f-41aa-a798-20ac500141af 2024-12-09T01:53:00,840 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/.tmp/hbase.id 2024-12-09T01:53:00,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:53:00,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:53:00,847 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/.tmp/hbase.id]:[hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/hbase.id] 2024-12-09T01:53:00,857 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:53:00,857 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T01:53:00,858 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T01:53:00,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:53:00,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:53:00,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:53:00,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:53:00,866 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:53:00,867 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T01:53:00,867 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:53:00,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:53:00,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:53:00,874 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store 2024-12-09T01:53:00,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:53:00,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:53:00,880 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:53:00,880 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:53:00,880 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:53:00,880 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:53:00,880 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:53:00,880 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:53:00,880 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:53:00,880 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709180880Disabling compacts and flushes for region at 1733709180880Disabling writes for close at 1733709180880Writing region close event to WAL at 1733709180880Closed at 1733709180880 2024-12-09T01:53:00,881 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/.initializing 2024-12-09T01:53:00,881 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/WALs/ef6f18c58dc9,33543,1733709180763 2024-12-09T01:53:00,883 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C33543%2C1733709180763, suffix=, logDir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/WALs/ef6f18c58dc9,33543,1733709180763, archiveDir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/oldWALs, maxLogs=10 2024-12-09T01:53:00,884 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C33543%2C1733709180763.1733709180884 2024-12-09T01:53:00,888 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/WALs/ef6f18c58dc9,33543,1733709180763/ef6f18c58dc9%2C33543%2C1733709180763.1733709180884 2024-12-09T01:53:00,889 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38505:38505),(127.0.0.1/127.0.0.1:33151:33151)] 2024-12-09T01:53:00,889 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:53:00,889 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:53:00,889 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:53:00,889 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:53:00,891 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:53:00,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T01:53:00,892 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:00,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:53:00,892 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:53:00,893 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T01:53:00,893 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:00,894 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:53:00,894 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:53:00,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T01:53:00,895 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:00,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:53:00,895 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:53:00,896 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T01:53:00,896 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:00,896 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:53:00,897 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:53:00,897 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:53:00,897 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:53:00,898 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:53:00,898 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:53:00,899 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:53:00,900 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:53:00,902 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:53:00,902 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857270, jitterRate=0.09007640182971954}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:53:00,903 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733709180890Initializing all the Stores at 1733709180890Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709180890Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709180890Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709180890Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709180890Cleaning up temporary data from old regions at 1733709180899 (+9 ms)Region opened successfully at 1733709180903 (+4 ms) 2024-12-09T01:53:00,903 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T01:53:00,905 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e6b3f3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:53:00,929 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T01:53:00,929 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T01:53:00,929 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T01:53:00,929 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T01:53:00,930 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T01:53:00,930 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T01:53:00,930 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T01:53:00,932 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T01:53:00,933 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T01:53:00,935 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T01:53:00,935 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T01:53:00,936 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T01:53:00,937 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T01:53:00,937 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T01:53:00,938 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T01:53:00,939 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T01:53:00,940 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T01:53:00,942 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T01:53:00,943 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T01:53:00,945 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T01:53:00,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:53:00,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:53:00,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:53:00,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:53:00,947 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef6f18c58dc9,33543,1733709180763, sessionid=0x100747a68320000, setting cluster-up flag (Was=false) 2024-12-09T01:53:00,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:53:00,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:53:00,955 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T01:53:00,956 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,33543,1733709180763 2024-12-09T01:53:00,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:53:00,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:53:00,964 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T01:53:00,965 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,33543,1733709180763 2024-12-09T01:53:00,966 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T01:53:00,968 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T01:53:00,968 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T01:53:00,969 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T01:53:00,969 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef6f18c58dc9,33543,1733709180763 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T01:53:00,970 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:53:00,970 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:53:00,970 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:53:00,970 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:53:00,970 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef6f18c58dc9:0, corePoolSize=10, maxPoolSize=10 2024-12-09T01:53:00,970 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:00,970 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:53:00,970 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:00,971 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733709210971 2024-12-09T01:53:00,971 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T01:53:00,971 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T01:53:00,971 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T01:53:00,972 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T01:53:00,972 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T01:53:00,972 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T01:53:00,972 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:53:00,972 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T01:53:00,972 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:00,973 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T01:53:00,973 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T01:53:00,973 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T01:53:00,973 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:00,973 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T01:53:00,973 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T01:53:00,973 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T01:53:00,974 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709180973,5,FailOnTimeoutGroup] 2024-12-09T01:53:00,974 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709180974,5,FailOnTimeoutGroup] 2024-12-09T01:53:00,974 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:00,974 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T01:53:00,974 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:00,974 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:00,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:53:00,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:53:00,980 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T01:53:00,980 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5 2024-12-09T01:53:00,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:53:00,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:53:00,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:53:00,987 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:53:00,989 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:53:00,989 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:00,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:53:00,989 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:53:00,990 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:53:00,990 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:00,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:53:00,991 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:53:00,992 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:53:00,992 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:00,992 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:53:00,992 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:53:00,993 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:53:00,993 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:00,994 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:53:00,994 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:53:00,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740 2024-12-09T01:53:00,995 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740 2024-12-09T01:53:00,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:00,996 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:53:00,996 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:53:00,997 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:53:00,998 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:53:01,002 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:53:01,002 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835134, jitterRate=0.06192895770072937}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:53:01,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733709180986Initializing all the Stores at 1733709180987 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709180987Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709180987Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709180987Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709180987Cleaning up temporary data from old regions at 1733709180996 (+9 ms)Region opened successfully at 1733709181003 (+7 ms) 2024-12-09T01:53:01,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:53:01,003 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:53:01,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:53:01,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:53:01,003 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:53:01,004 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:53:01,004 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709181003Disabling compacts and flushes for region at 1733709181003Disabling writes for close at 1733709181003Writing region close event to WAL at 1733709181004 (+1 ms)Closed at 1733709181004 2024-12-09T01:53:01,005 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:53:01,005 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T01:53:01,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T01:53:01,007 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:53:01,008 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T01:53:01,030 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(746): ClusterId : 74a4ddeb-8a4f-41aa-a798-20ac500141af 2024-12-09T01:53:01,030 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:53:01,032 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:53:01,032 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:53:01,035 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:53:01,035 DEBUG [RS:0;ef6f18c58dc9:34803 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bb570d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:53:01,048 DEBUG [RS:0;ef6f18c58dc9:34803 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef6f18c58dc9:34803 2024-12-09T01:53:01,048 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:53:01,048 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:53:01,048 DEBUG [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:53:01,048 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,33543,1733709180763 with port=34803, startcode=1733709180806 2024-12-09T01:53:01,049 DEBUG [RS:0;ef6f18c58dc9:34803 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:53:01,051 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43051, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:53:01,051 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33543 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:01,051 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33543 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:01,053 DEBUG [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5 2024-12-09T01:53:01,053 DEBUG [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36289 2024-12-09T01:53:01,053 DEBUG [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:53:01,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:53:01,056 DEBUG [RS:0;ef6f18c58dc9:34803 {}] zookeeper.ZKUtil(111): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:01,056 WARN [RS:0;ef6f18c58dc9:34803 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:53:01,056 INFO [RS:0;ef6f18c58dc9:34803 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:53:01,056 DEBUG [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:01,056 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,34803,1733709180806] 2024-12-09T01:53:01,059 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:53:01,061 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:53:01,061 INFO [RS:0;ef6f18c58dc9:34803 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:53:01,061 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,061 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:53:01,062 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:53:01,062 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,062 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:01,062 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:01,063 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:01,063 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:01,063 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:01,063 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:53:01,063 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:01,063 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:01,063 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:01,063 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:01,063 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:01,063 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:53:01,063 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:53:01,063 DEBUG [RS:0;ef6f18c58dc9:34803 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:53:01,063 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,063 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,063 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,063 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,063 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,063 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34803,1733709180806-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:53:01,078 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:53:01,079 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34803,1733709180806-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,079 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,079 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.Replication(171): ef6f18c58dc9,34803,1733709180806 started 2024-12-09T01:53:01,093 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,093 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,34803,1733709180806, RpcServer on ef6f18c58dc9/172.17.0.2:34803, sessionid=0x100747a68320001 2024-12-09T01:53:01,093 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:53:01,093 DEBUG [RS:0;ef6f18c58dc9:34803 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:01,093 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,34803,1733709180806' 2024-12-09T01:53:01,093 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:53:01,094 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:53:01,094 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:53:01,094 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:53:01,094 DEBUG [RS:0;ef6f18c58dc9:34803 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:01,094 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,34803,1733709180806' 2024-12-09T01:53:01,094 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:53:01,094 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:53:01,095 DEBUG [RS:0;ef6f18c58dc9:34803 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:53:01,095 INFO [RS:0;ef6f18c58dc9:34803 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:53:01,095 INFO [RS:0;ef6f18c58dc9:34803 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:53:01,158 WARN [ef6f18c58dc9:33543 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T01:53:01,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:01,197 INFO [RS:0;ef6f18c58dc9:34803 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C34803%2C1733709180806, suffix=, logDir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806, archiveDir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/oldWALs, maxLogs=32 2024-12-09T01:53:01,197 INFO [RS:0;ef6f18c58dc9:34803 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C34803%2C1733709180806.1733709181197 2024-12-09T01:53:01,203 INFO [RS:0;ef6f18c58dc9:34803 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806/ef6f18c58dc9%2C34803%2C1733709180806.1733709181197 2024-12-09T01:53:01,204 DEBUG [RS:0;ef6f18c58dc9:34803 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33151:33151),(127.0.0.1/127.0.0.1:38505:38505)] 2024-12-09T01:53:01,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:01,408 DEBUG [ef6f18c58dc9:33543 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T01:53:01,409 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:01,410 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,34803,1733709180806, state=OPENING 2024-12-09T01:53:01,412 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T01:53:01,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:53:01,414 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:53:01,414 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:53:01,414 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:53:01,415 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:53:01,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,34803,1733709180806}] 2024-12-09T01:53:01,568 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T01:53:01,570 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49565, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T01:53:01,573 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T01:53:01,573 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:53:01,575 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C34803%2C1733709180806.meta, suffix=.meta, logDir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806, archiveDir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/oldWALs, maxLogs=32 2024-12-09T01:53:01,575 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C34803%2C1733709180806.meta.1733709181575.meta 2024-12-09T01:53:01,583 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806/ef6f18c58dc9%2C34803%2C1733709180806.meta.1733709181575.meta 2024-12-09T01:53:01,584 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33151:33151),(127.0.0.1/127.0.0.1:38505:38505)] 2024-12-09T01:53:01,585 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:53:01,585 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T01:53:01,585 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T01:53:01,585 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T01:53:01,585 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T01:53:01,585 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:53:01,586 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T01:53:01,586 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T01:53:01,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:53:01,588 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:53:01,588 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:01,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:53:01,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:53:01,589 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:53:01,589 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:01,589 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:53:01,589 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:53:01,590 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:53:01,590 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:01,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:53:01,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:53:01,591 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:53:01,591 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:01,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:53:01,591 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:53:01,592 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740 2024-12-09T01:53:01,593 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740 2024-12-09T01:53:01,594 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:53:01,594 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:53:01,594 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:53:01,596 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:53:01,596 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=782202, jitterRate=-0.005379512906074524}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:53:01,596 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T01:53:01,597 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733709181586Writing region info on filesystem at 1733709181586Initializing all the Stores at 1733709181586Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709181586Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709181587 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709181587Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709181587Cleaning up temporary data from old regions at 1733709181594 (+7 ms)Running coprocessor post-open hooks at 1733709181596 (+2 ms)Region opened successfully at 1733709181597 (+1 ms) 2024-12-09T01:53:01,598 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733709181567 2024-12-09T01:53:01,600 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T01:53:01,600 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T01:53:01,601 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:01,602 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,34803,1733709180806, state=OPEN 2024-12-09T01:53:01,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:53:01,607 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:53:01,607 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:53:01,607 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:53:01,607 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:01,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T01:53:01,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,34803,1733709180806 in 193 msec 2024-12-09T01:53:01,612 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T01:53:01,612 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 605 msec 2024-12-09T01:53:01,613 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:53:01,613 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T01:53:01,614 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:53:01,614 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,34803,1733709180806, seqNum=-1] 2024-12-09T01:53:01,614 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:53:01,616 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60459, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:53:01,621 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 652 msec 2024-12-09T01:53:01,621 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733709181621, completionTime=-1 2024-12-09T01:53:01,621 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T01:53:01,621 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T01:53:01,623 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T01:53:01,623 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733709241623 2024-12-09T01:53:01,623 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733709301623 2024-12-09T01:53:01,623 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T01:53:01,623 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33543,1733709180763-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,623 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33543,1733709180763-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,624 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33543,1733709180763-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,624 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef6f18c58dc9:33543, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,624 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,624 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,625 DEBUG [master/ef6f18c58dc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T01:53:01,627 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.790sec 2024-12-09T01:53:01,627 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T01:53:01,628 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T01:53:01,628 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T01:53:01,628 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T01:53:01,628 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T01:53:01,628 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33543,1733709180763-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:53:01,628 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33543,1733709180763-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T01:53:01,630 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5773e0ea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:53:01,630 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ef6f18c58dc9,33543,-1 for getting cluster id 2024-12-09T01:53:01,630 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T01:53:01,631 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T01:53:01,631 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T01:53:01,631 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33543,1733709180763-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:53:01,632 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '74a4ddeb-8a4f-41aa-a798-20ac500141af' 2024-12-09T01:53:01,632 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T01:53:01,632 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "74a4ddeb-8a4f-41aa-a798-20ac500141af" 2024-12-09T01:53:01,632 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75642934, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:53:01,632 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ef6f18c58dc9,33543,-1] 2024-12-09T01:53:01,633 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T01:53:01,633 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:53:01,634 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48978, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T01:53:01,635 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ab86f9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:53:01,635 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:53:01,636 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,34803,1733709180806, seqNum=-1] 2024-12-09T01:53:01,636 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:53:01,637 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47262, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:53:01,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ef6f18c58dc9,33543,1733709180763 2024-12-09T01:53:01,639 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:53:01,641 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T01:53:01,641 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T01:53:01,642 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is ef6f18c58dc9,33543,1733709180763 2024-12-09T01:53:01,642 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7bd08b43 2024-12-09T01:53:01,642 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T01:53:01,643 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48988, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T01:53:01,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33543 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-12-09T01:53:01,644 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33543 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-12-09T01:53:01,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33543 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:53:01,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33543 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-12-09T01:53:01,646 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T01:53:01,646 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:01,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33543 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-12-09T01:53:01,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:53:01,647 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T01:53:01,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741835_1011 (size=381) 2024-12-09T01:53:01,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741835_1011 (size=381) 2024-12-09T01:53:01,655 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 016b1ee3f2107d7f0bb81894adf8f707, NAME => 'TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5 2024-12-09T01:53:01,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741836_1012 (size=64) 2024-12-09T01:53:01,661 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:53:01,662 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 016b1ee3f2107d7f0bb81894adf8f707, disabling compactions & flushes 2024-12-09T01:53:01,662 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:01,662 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:01,662 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. after waiting 0 ms 2024-12-09T01:53:01,662 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:01,662 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:01,662 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 016b1ee3f2107d7f0bb81894adf8f707: Waiting for close lock at 1733709181662Disabling compacts and flushes for region at 1733709181662Disabling writes for close at 1733709181662Writing region close event to WAL at 1733709181662Closed at 1733709181662 2024-12-09T01:53:01,663 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T01:53:01,664 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733709181663"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733709181663"}]},"ts":"1733709181663"} 2024-12-09T01:53:01,666 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T01:53:01,667 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T01:53:01,667 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733709181667"}]},"ts":"1733709181667"} 2024-12-09T01:53:01,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741836_1012 (size=64) 2024-12-09T01:53:01,669 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-12-09T01:53:01,669 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=016b1ee3f2107d7f0bb81894adf8f707, ASSIGN}] 2024-12-09T01:53:01,670 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=016b1ee3f2107d7f0bb81894adf8f707, ASSIGN 2024-12-09T01:53:01,671 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=016b1ee3f2107d7f0bb81894adf8f707, ASSIGN; state=OFFLINE, location=ef6f18c58dc9,34803,1733709180806; forceNewPlan=false, retain=false 2024-12-09T01:53:01,822 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=016b1ee3f2107d7f0bb81894adf8f707, regionState=OPENING, regionLocation=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:01,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=016b1ee3f2107d7f0bb81894adf8f707, ASSIGN because future has completed 2024-12-09T01:53:01,825 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 016b1ee3f2107d7f0bb81894adf8f707, server=ef6f18c58dc9,34803,1733709180806}] 2024-12-09T01:53:01,982 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:01,982 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 016b1ee3f2107d7f0bb81894adf8f707, NAME => 'TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:53:01,982 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:01,982 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:53:01,982 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:01,982 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:01,983 INFO [StoreOpener-016b1ee3f2107d7f0bb81894adf8f707-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:01,985 INFO [StoreOpener-016b1ee3f2107d7f0bb81894adf8f707-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 016b1ee3f2107d7f0bb81894adf8f707 columnFamilyName info 2024-12-09T01:53:01,985 DEBUG [StoreOpener-016b1ee3f2107d7f0bb81894adf8f707-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:01,985 INFO [StoreOpener-016b1ee3f2107d7f0bb81894adf8f707-1 {}] regionserver.HStore(327): Store=016b1ee3f2107d7f0bb81894adf8f707/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:53:01,985 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:01,986 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:01,986 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:01,987 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:01,987 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:01,988 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:01,990 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:53:01,990 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 016b1ee3f2107d7f0bb81894adf8f707; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=752545, jitterRate=-0.043090760707855225}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T01:53:01,990 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:01,991 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 016b1ee3f2107d7f0bb81894adf8f707: Running coprocessor pre-open hook at 1733709181982Writing region info on filesystem at 1733709181982Initializing all the Stores at 1733709181983 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709181983Cleaning up temporary data from old regions at 1733709181987 (+4 ms)Running coprocessor post-open hooks at 1733709181990 (+3 ms)Region opened successfully at 1733709181991 (+1 ms) 2024-12-09T01:53:01,992 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., pid=6, masterSystemTime=1733709181977 2024-12-09T01:53:01,995 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:01,995 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:01,996 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=016b1ee3f2107d7f0bb81894adf8f707, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:01,995 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:01,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 016b1ee3f2107d7f0bb81894adf8f707, server=ef6f18c58dc9,34803,1733709180806 because future has completed 2024-12-09T01:53:02,002 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T01:53:02,002 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 016b1ee3f2107d7f0bb81894adf8f707, server=ef6f18c58dc9,34803,1733709180806 in 174 msec 2024-12-09T01:53:02,004 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T01:53:02,004 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=016b1ee3f2107d7f0bb81894adf8f707, ASSIGN in 333 msec 2024-12-09T01:53:02,005 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T01:53:02,005 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733709182005"}]},"ts":"1733709182005"} 2024-12-09T01:53:02,007 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-12-09T01:53:02,008 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T01:53:02,010 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 364 msec 2024-12-09T01:53:02,176 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:02,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:02,996 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:03,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-12-09T01:53:03,060 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-12-09T01:53:03,060 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-12-09T01:53:03,177 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:03,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:03,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:04,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:04,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:04,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,565 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,580 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,585 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:04,997 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:05,088 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T01:53:05,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,091 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,108 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,109 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,113 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,116 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:05,178 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:05,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:05,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:06,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:06,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:06,998 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:07,059 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T01:53:07,060 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-12-09T01:53:07,179 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:07,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:07,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:08,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:08,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:08,999 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:09,180 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:09,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:10,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:10,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:10,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:11,000 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:11,181 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:11,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:11,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33543 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T01:53:11,685 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-12-09T01:53:11,685 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-12-09T01:53:11,688 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-12-09T01:53:11,688 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:11,690 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=2] 2024-12-09T01:53:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:11,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 016b1ee3f2107d7f0bb81894adf8f707 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T01:53:11,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/529e772cdde146b795ce413a80891035 is 1080, key is row0001/info:/1733709191691/Put/seqid=0 2024-12-09T01:53:11,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741837_1013 (size=12509) 2024-12-09T01:53:11,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741837_1013 (size=12509) 2024-12-09T01:53:11,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/529e772cdde146b795ce413a80891035 2024-12-09T01:53:11,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=016b1ee3f2107d7f0bb81894adf8f707, server=ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-09T01:53:11,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/529e772cdde146b795ce413a80891035 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/529e772cdde146b795ce413a80891035 2024-12-09T01:53:11,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47262 deadline: 1733709201739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=016b1ee3f2107d7f0bb81894adf8f707, server=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:11,747 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/529e772cdde146b795ce413a80891035, entries=7, sequenceid=11, filesize=12.2 K 2024-12-09T01:53:11,748 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 016b1ee3f2107d7f0bb81894adf8f707 in 47ms, sequenceid=11, compaction requested=false 2024-12-09T01:53:11,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 016b1ee3f2107d7f0bb81894adf8f707: 2024-12-09T01:53:11,775 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=016b1ee3f2107d7f0bb81894adf8f707, server=ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T01:53:11,776 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=016b1ee3f2107d7f0bb81894adf8f707, server=ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T01:53:11,776 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=2 because the exception is null or not the one we care about 2024-12-09T01:53:12,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:12,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:12,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:13,001 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:13,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:13,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:14,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:14,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:14,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:15,002 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:15,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:15,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:16,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:16,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:16,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:17,003 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:17,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:17,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:18,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:18,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:18,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:19,004 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:19,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:19,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:20,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:20,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:20,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:21,005 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:21,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:21,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:21,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:21,807 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 016b1ee3f2107d7f0bb81894adf8f707 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-12-09T01:53:21,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/5d34e7a59f9847739ae7af4cdc795165 is 1080, key is row0008/info:/1733709191702/Put/seqid=0 2024-12-09T01:53:21,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741838_1014 (size=29761) 2024-12-09T01:53:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741838_1014 (size=29761) 2024-12-09T01:53:21,818 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/5d34e7a59f9847739ae7af4cdc795165 2024-12-09T01:53:21,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/5d34e7a59f9847739ae7af4cdc795165 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/5d34e7a59f9847739ae7af4cdc795165 2024-12-09T01:53:21,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/5d34e7a59f9847739ae7af4cdc795165, entries=23, sequenceid=37, filesize=29.1 K 2024-12-09T01:53:21,829 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 016b1ee3f2107d7f0bb81894adf8f707 in 22ms, sequenceid=37, compaction requested=false 2024-12-09T01:53:21,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 016b1ee3f2107d7f0bb81894adf8f707: 2024-12-09T01:53:21,830 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-12-09T01:53:21,830 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:21,830 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/5d34e7a59f9847739ae7af4cdc795165 because midkey is the same as first or last row 2024-12-09T01:53:22,006 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:22,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:22,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:23,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:23,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:23,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:23,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:23,822 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 016b1ee3f2107d7f0bb81894adf8f707 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T01:53:23,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/576da9574d1046328c684288869febf8 is 1080, key is row0031/info:/1733709201809/Put/seqid=0 2024-12-09T01:53:23,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741839_1015 (size=12509) 2024-12-09T01:53:23,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741839_1015 (size=12509) 2024-12-09T01:53:23,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/576da9574d1046328c684288869febf8 2024-12-09T01:53:23,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/576da9574d1046328c684288869febf8 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/576da9574d1046328c684288869febf8 2024-12-09T01:53:23,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/576da9574d1046328c684288869febf8, entries=7, sequenceid=47, filesize=12.2 K 2024-12-09T01:53:23,845 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 016b1ee3f2107d7f0bb81894adf8f707 in 24ms, sequenceid=47, compaction requested=true 2024-12-09T01:53:23,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 016b1ee3f2107d7f0bb81894adf8f707: 2024-12-09T01:53:23,846 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-12-09T01:53:23,846 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:23,846 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/5d34e7a59f9847739ae7af4cdc795165 because midkey is the same as first or last row 2024-12-09T01:53:23,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 016b1ee3f2107d7f0bb81894adf8f707:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T01:53:23,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:23,846 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T01:53:23,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:23,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 016b1ee3f2107d7f0bb81894adf8f707 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-09T01:53:23,847 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T01:53:23,847 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1541): 016b1ee3f2107d7f0bb81894adf8f707/info is initiating minor compaction (all files) 2024-12-09T01:53:23,848 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 016b1ee3f2107d7f0bb81894adf8f707/info in TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:23,848 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/529e772cdde146b795ce413a80891035, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/5d34e7a59f9847739ae7af4cdc795165, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/576da9574d1046328c684288869febf8] into tmpdir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp, totalSize=53.5 K 2024-12-09T01:53:23,848 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 529e772cdde146b795ce413a80891035, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1733709191691 2024-12-09T01:53:23,849 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5d34e7a59f9847739ae7af4cdc795165, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733709191702 2024-12-09T01:53:23,849 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 576da9574d1046328c684288869febf8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733709201809 2024-12-09T01:53:23,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/d9fb0ed8cf224c2a910dc399e8a4ad5f is 1080, key is row0038/info:/1733709203822/Put/seqid=0 2024-12-09T01:53:23,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741840_1016 (size=20064) 2024-12-09T01:53:23,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741840_1016 (size=20064) 2024-12-09T01:53:23,862 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=64 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/d9fb0ed8cf224c2a910dc399e8a4ad5f 2024-12-09T01:53:23,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/d9fb0ed8cf224c2a910dc399e8a4ad5f as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/d9fb0ed8cf224c2a910dc399e8a4ad5f 2024-12-09T01:53:23,873 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 016b1ee3f2107d7f0bb81894adf8f707#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:53:23,874 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/8a9cb902e3d14fa58719684c4a978954 is 1080, key is row0001/info:/1733709191691/Put/seqid=0 2024-12-09T01:53:23,876 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/d9fb0ed8cf224c2a910dc399e8a4ad5f, entries=14, sequenceid=64, filesize=19.6 K 2024-12-09T01:53:23,877 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 016b1ee3f2107d7f0bb81894adf8f707 in 30ms, sequenceid=64, compaction requested=false 2024-12-09T01:53:23,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 016b1ee3f2107d7f0bb81894adf8f707: 2024-12-09T01:53:23,877 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=73.1 K, sizeToCheck=16.0 K 2024-12-09T01:53:23,877 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:23,877 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/5d34e7a59f9847739ae7af4cdc795165 because midkey is the same as first or last row 2024-12-09T01:53:23,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741841_1017 (size=44978) 2024-12-09T01:53:23,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741841_1017 (size=44978) 2024-12-09T01:53:23,889 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/8a9cb902e3d14fa58719684c4a978954 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/8a9cb902e3d14fa58719684c4a978954 2024-12-09T01:53:23,895 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 016b1ee3f2107d7f0bb81894adf8f707/info of 016b1ee3f2107d7f0bb81894adf8f707 into 8a9cb902e3d14fa58719684c4a978954(size=43.9 K), total size for store is 63.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:53:23,895 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 016b1ee3f2107d7f0bb81894adf8f707: 2024-12-09T01:53:23,895 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., storeName=016b1ee3f2107d7f0bb81894adf8f707/info, priority=13, startTime=1733709203846; duration=0sec 2024-12-09T01:53:23,895 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-12-09T01:53:23,895 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:23,895 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/8a9cb902e3d14fa58719684c4a978954 because midkey is the same as first or last row 2024-12-09T01:53:23,895 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-12-09T01:53:23,895 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:23,895 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/8a9cb902e3d14fa58719684c4a978954 because midkey is the same as first or last row 2024-12-09T01:53:23,895 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.5 K, sizeToCheck=16.0 K 2024-12-09T01:53:23,895 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:23,895 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/8a9cb902e3d14fa58719684c4a978954 because midkey is the same as first or last row 2024-12-09T01:53:23,895 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:23,895 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 016b1ee3f2107d7f0bb81894adf8f707:info 2024-12-09T01:53:24,007 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:24,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:24,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:25,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:25,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:25,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:25,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:25,875 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 016b1ee3f2107d7f0bb81894adf8f707 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-09T01:53:25,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/44683f5113d54930b586429012c8e7fd is 1080, key is row0052/info:/1733709203848/Put/seqid=0 2024-12-09T01:53:25,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741842_1018 (size=20064) 2024-12-09T01:53:25,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741842_1018 (size=20064) 2024-12-09T01:53:25,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/44683f5113d54930b586429012c8e7fd 2024-12-09T01:53:25,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/44683f5113d54930b586429012c8e7fd as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/44683f5113d54930b586429012c8e7fd 2024-12-09T01:53:25,900 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/44683f5113d54930b586429012c8e7fd, entries=14, sequenceid=82, filesize=19.6 K 2024-12-09T01:53:25,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=13.66 KB/13988 for 016b1ee3f2107d7f0bb81894adf8f707 in 25ms, sequenceid=82, compaction requested=true 2024-12-09T01:53:25,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 016b1ee3f2107d7f0bb81894adf8f707: 2024-12-09T01:53:25,901 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=83.1 K, sizeToCheck=16.0 K 2024-12-09T01:53:25,901 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:25,901 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/8a9cb902e3d14fa58719684c4a978954 because midkey is the same as first or last row 2024-12-09T01:53:25,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 016b1ee3f2107d7f0bb81894adf8f707:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T01:53:25,901 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:25,901 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T01:53:25,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:25,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 016b1ee3f2107d7f0bb81894adf8f707 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-12-09T01:53:25,902 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T01:53:25,902 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1541): 016b1ee3f2107d7f0bb81894adf8f707/info is initiating minor compaction (all files) 2024-12-09T01:53:25,903 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 016b1ee3f2107d7f0bb81894adf8f707/info in TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:25,903 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/8a9cb902e3d14fa58719684c4a978954, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/d9fb0ed8cf224c2a910dc399e8a4ad5f, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/44683f5113d54930b586429012c8e7fd] into tmpdir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp, totalSize=83.1 K 2024-12-09T01:53:25,903 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8a9cb902e3d14fa58719684c4a978954, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1733709191691 2024-12-09T01:53:25,903 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting d9fb0ed8cf224c2a910dc399e8a4ad5f, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=64, earliestPutTs=1733709203822 2024-12-09T01:53:25,904 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 44683f5113d54930b586429012c8e7fd, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733709203848 2024-12-09T01:53:25,906 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/1501ce5fc0254636a9b491e198eabf55 is 1080, key is row0066/info:/1733709205877/Put/seqid=0 2024-12-09T01:53:25,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741843_1019 (size=20064) 2024-12-09T01:53:25,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741843_1019 (size=20064) 2024-12-09T01:53:25,912 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=99 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/1501ce5fc0254636a9b491e198eabf55 2024-12-09T01:53:25,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/1501ce5fc0254636a9b491e198eabf55 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/1501ce5fc0254636a9b491e198eabf55 2024-12-09T01:53:25,921 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 016b1ee3f2107d7f0bb81894adf8f707#info#compaction#62 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:53:25,922 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/225030c839dc4db3a3b3e2ee4c255c2b is 1080, key is row0001/info:/1733709191691/Put/seqid=0 2024-12-09T01:53:25,926 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/1501ce5fc0254636a9b491e198eabf55, entries=14, sequenceid=99, filesize=19.6 K 2024-12-09T01:53:25,927 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=11.56 KB/11836 for 016b1ee3f2107d7f0bb81894adf8f707 in 25ms, sequenceid=99, compaction requested=false 2024-12-09T01:53:25,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 016b1ee3f2107d7f0bb81894adf8f707: 2024-12-09T01:53:25,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:25,928 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.7 K, sizeToCheck=16.0 K 2024-12-09T01:53:25,928 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:25,928 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/8a9cb902e3d14fa58719684c4a978954 because midkey is the same as first or last row 2024-12-09T01:53:25,928 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 016b1ee3f2107d7f0bb81894adf8f707 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-09T01:53:25,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/c1d7461714734236a5870e2e1645a5c2 is 1080, key is row0080/info:/1733709205903/Put/seqid=0 2024-12-09T01:53:25,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741844_1020 (size=75378) 2024-12-09T01:53:25,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741844_1020 (size=75378) 2024-12-09T01:53:25,945 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/225030c839dc4db3a3b3e2ee4c255c2b as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/225030c839dc4db3a3b3e2ee4c255c2b 2024-12-09T01:53:25,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741845_1021 (size=17894) 2024-12-09T01:53:25,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741845_1021 (size=17894) 2024-12-09T01:53:25,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/c1d7461714734236a5870e2e1645a5c2 2024-12-09T01:53:25,952 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 016b1ee3f2107d7f0bb81894adf8f707/info of 016b1ee3f2107d7f0bb81894adf8f707 into 225030c839dc4db3a3b3e2ee4c255c2b(size=73.6 K), total size for store is 93.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:53:25,952 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 016b1ee3f2107d7f0bb81894adf8f707: 2024-12-09T01:53:25,952 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., storeName=016b1ee3f2107d7f0bb81894adf8f707/info, priority=13, startTime=1733709205901; duration=0sec 2024-12-09T01:53:25,952 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-12-09T01:53:25,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/c1d7461714734236a5870e2e1645a5c2 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/c1d7461714734236a5870e2e1645a5c2 2024-12-09T01:53:25,952 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:25,952 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-12-09T01:53:25,952 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:25,952 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=93.2 K, sizeToCheck=16.0 K 2024-12-09T01:53:25,952 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:25,953 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:25,953 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:25,953 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 016b1ee3f2107d7f0bb81894adf8f707:info 2024-12-09T01:53:25,954 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33543 {}] assignment.AssignmentManager(1363): Split request from ef6f18c58dc9,34803,1733709180806, parent={ENCODED => 016b1ee3f2107d7f0bb81894adf8f707, NAME => 'TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-09T01:53:25,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/c1d7461714734236a5870e2e1645a5c2, entries=12, sequenceid=114, filesize=17.5 K 2024-12-09T01:53:25,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=5.25 KB/5380 for 016b1ee3f2107d7f0bb81894adf8f707 in 30ms, sequenceid=114, compaction requested=true 2024-12-09T01:53:25,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 016b1ee3f2107d7f0bb81894adf8f707: 2024-12-09T01:53:25,959 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=110.7 K, sizeToCheck=16.0 K 2024-12-09T01:53:25,959 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:25,959 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=110.7 K, sizeToCheck=16.0 K 2024-12-09T01:53:25,959 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:25,959 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=110.7 K, sizeToCheck=16.0 K 2024-12-09T01:53:25,959 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-12-09T01:53:25,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-12-09T01:53:25,960 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33543 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:25,964 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33543 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=016b1ee3f2107d7f0bb81894adf8f707, daughterA=c6ba1446e1f5c7a309d87608150019a6, daughterB=59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:25,965 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=016b1ee3f2107d7f0bb81894adf8f707, daughterA=c6ba1446e1f5c7a309d87608150019a6, daughterB=59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:25,965 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=016b1ee3f2107d7f0bb81894adf8f707, daughterA=c6ba1446e1f5c7a309d87608150019a6, daughterB=59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:25,965 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=016b1ee3f2107d7f0bb81894adf8f707, daughterA=c6ba1446e1f5c7a309d87608150019a6, daughterB=59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:25,966 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33543 {}] assignment.AssignmentManager(1363): Split request from ef6f18c58dc9,34803,1733709180806, parent={ENCODED => 016b1ee3f2107d7f0bb81894adf8f707, NAME => 'TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-12-09T01:53:25,967 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33543 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:25,968 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33543 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=016b1ee3f2107d7f0bb81894adf8f707, daughterA=37adab5961fcb6e563627cd4fbfd129d, daughterB=2dbf60516aca990459dba4b32dc73b3c 2024-12-09T01:53:25,968 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=016b1ee3f2107d7f0bb81894adf8f707, daughterA=37adab5961fcb6e563627cd4fbfd129d, daughterB=2dbf60516aca990459dba4b32dc73b3c held by pid=7 2024-12-09T01:53:25,973 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=016b1ee3f2107d7f0bb81894adf8f707, UNASSIGN}] 2024-12-09T01:53:25,976 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-12-09T01:53:25,976 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=016b1ee3f2107d7f0bb81894adf8f707, UNASSIGN 2024-12-09T01:53:25,976 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=016b1ee3f2107d7f0bb81894adf8f707, daughterA=37adab5961fcb6e563627cd4fbfd129d, daughterB=2dbf60516aca990459dba4b32dc73b3c 2024-12-09T01:53:25,978 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=016b1ee3f2107d7f0bb81894adf8f707, regionState=CLOSING, regionLocation=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:25,980 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=016b1ee3f2107d7f0bb81894adf8f707, UNASSIGN because future has completed 2024-12-09T01:53:25,980 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-09T01:53:25,980 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 016b1ee3f2107d7f0bb81894adf8f707, server=ef6f18c58dc9,34803,1733709180806}] 2024-12-09T01:53:26,008 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:26,137 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,138 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-09T01:53:26,138 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing 016b1ee3f2107d7f0bb81894adf8f707, disabling compactions & flushes 2024-12-09T01:53:26,138 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:26,138 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:26,138 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. after waiting 0 ms 2024-12-09T01:53:26,138 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:26,138 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing 016b1ee3f2107d7f0bb81894adf8f707 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-12-09T01:53:26,143 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/b23851a53c7e4c699e50179884d33738 is 1080, key is row0092/info:/1733709205929/Put/seqid=0 2024-12-09T01:53:26,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741846_1022 (size=10347) 2024-12-09T01:53:26,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741846_1022 (size=10347) 2024-12-09T01:53:26,148 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=123 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/b23851a53c7e4c699e50179884d33738 2024-12-09T01:53:26,154 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/.tmp/info/b23851a53c7e4c699e50179884d33738 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/b23851a53c7e4c699e50179884d33738 2024-12-09T01:53:26,159 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/b23851a53c7e4c699e50179884d33738, entries=5, sequenceid=123, filesize=10.1 K 2024-12-09T01:53:26,160 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 016b1ee3f2107d7f0bb81894adf8f707 in 22ms, sequenceid=123, compaction requested=true 2024-12-09T01:53:26,161 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/529e772cdde146b795ce413a80891035, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/5d34e7a59f9847739ae7af4cdc795165, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/8a9cb902e3d14fa58719684c4a978954, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/576da9574d1046328c684288869febf8, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/d9fb0ed8cf224c2a910dc399e8a4ad5f, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/44683f5113d54930b586429012c8e7fd] to archive 2024-12-09T01:53:26,161 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T01:53:26,163 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/529e772cdde146b795ce413a80891035 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/529e772cdde146b795ce413a80891035 2024-12-09T01:53:26,164 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/5d34e7a59f9847739ae7af4cdc795165 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/5d34e7a59f9847739ae7af4cdc795165 2024-12-09T01:53:26,165 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/8a9cb902e3d14fa58719684c4a978954 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/8a9cb902e3d14fa58719684c4a978954 2024-12-09T01:53:26,167 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/576da9574d1046328c684288869febf8 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/576da9574d1046328c684288869febf8 2024-12-09T01:53:26,168 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/d9fb0ed8cf224c2a910dc399e8a4ad5f to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/d9fb0ed8cf224c2a910dc399e8a4ad5f 2024-12-09T01:53:26,169 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/44683f5113d54930b586429012c8e7fd to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/44683f5113d54930b586429012c8e7fd 2024-12-09T01:53:26,175 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=1 2024-12-09T01:53:26,176 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. 2024-12-09T01:53:26,176 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for 016b1ee3f2107d7f0bb81894adf8f707: Waiting for close lock at 1733709206138Running coprocessor pre-close hooks at 1733709206138Disabling compacts and flushes for region at 1733709206138Disabling writes for close at 1733709206138Obtaining lock to block concurrent updates at 1733709206138Preparing flush snapshotting stores in 016b1ee3f2107d7f0bb81894adf8f707 at 1733709206138Finished memstore snapshotting TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., syncing WAL and waiting on mvcc, flushsize=dataSize=5380, getHeapSize=6000, getOffHeapSize=0, getCellsCount=5 at 1733709206139 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. at 1733709206139Flushing 016b1ee3f2107d7f0bb81894adf8f707/info: creating writer at 1733709206139Flushing 016b1ee3f2107d7f0bb81894adf8f707/info: appending metadata at 1733709206142 (+3 ms)Flushing 016b1ee3f2107d7f0bb81894adf8f707/info: closing flushed file at 1733709206142Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f17ef69: reopening flushed file at 1733709206153 (+11 ms)Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 016b1ee3f2107d7f0bb81894adf8f707 in 22ms, sequenceid=123, compaction requested=true at 1733709206160 (+7 ms)Writing region close event to WAL at 1733709206171 (+11 ms)Running coprocessor post-close hooks at 1733709206175 (+4 ms)Closed at 1733709206175 2024-12-09T01:53:26,178 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,178 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=016b1ee3f2107d7f0bb81894adf8f707, regionState=CLOSED 2024-12-09T01:53:26,180 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 016b1ee3f2107d7f0bb81894adf8f707, server=ef6f18c58dc9,34803,1733709180806 because future has completed 2024-12-09T01:53:26,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-09T01:53:26,183 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure 016b1ee3f2107d7f0bb81894adf8f707, server=ef6f18c58dc9,34803,1733709180806 in 201 msec 2024-12-09T01:53:26,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-12-09T01:53:26,185 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=016b1ee3f2107d7f0bb81894adf8f707, UNASSIGN in 211 msec 2024-12-09T01:53:26,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:26,193 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:26,197 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=016b1ee3f2107d7f0bb81894adf8f707, threads=4 2024-12-09T01:53:26,200 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/c1d7461714734236a5870e2e1645a5c2 for region: 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,200 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/b23851a53c7e4c699e50179884d33738 for region: 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,200 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/225030c839dc4db3a3b3e2ee4c255c2b for region: 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,200 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/1501ce5fc0254636a9b491e198eabf55 for region: 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,213 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/c1d7461714734236a5870e2e1645a5c2, top=true 2024-12-09T01:53:26,214 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/b23851a53c7e4c699e50179884d33738, top=true 2024-12-09T01:53:26,219 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/1501ce5fc0254636a9b491e198eabf55, top=true 2024-12-09T01:53:26,220 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-c1d7461714734236a5870e2e1645a5c2 for child: 59d8a49309daf32c4dda194bc963c4f3, parent: 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,220 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/c1d7461714734236a5870e2e1645a5c2 for region: 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,224 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-b23851a53c7e4c699e50179884d33738 for child: 59d8a49309daf32c4dda194bc963c4f3, parent: 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,224 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/b23851a53c7e4c699e50179884d33738 for region: 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741847_1023 (size=27) 2024-12-09T01:53:26,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741847_1023 (size=27) 2024-12-09T01:53:26,229 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-1501ce5fc0254636a9b491e198eabf55 for child: 59d8a49309daf32c4dda194bc963c4f3, parent: 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,229 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/1501ce5fc0254636a9b491e198eabf55 for region: 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741848_1024 (size=27) 2024-12-09T01:53:26,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741848_1024 (size=27) 2024-12-09T01:53:26,241 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/225030c839dc4db3a3b3e2ee4c255c2b for region: 016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:53:26,243 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 016b1ee3f2107d7f0bb81894adf8f707 Daughter A: [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6/info/225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707] storefiles, Daughter B: [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-1501ce5fc0254636a9b491e198eabf55, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-b23851a53c7e4c699e50179884d33738, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-c1d7461714734236a5870e2e1645a5c2] storefiles. 2024-12-09T01:53:26,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:26,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741849_1025 (size=71) 2024-12-09T01:53:26,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741849_1025 (size=71) 2024-12-09T01:53:26,254 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:26,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741850_1026 (size=71) 2024-12-09T01:53:26,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741850_1026 (size=71) 2024-12-09T01:53:26,267 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:26,277 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-12-09T01:53:26,279 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/recovered.edits/126.seqid, newMaxSeqId=126, maxSeqId=-1 2024-12-09T01:53:26,281 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1733709206281"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1733709206281"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1733709206281"}]},"ts":"1733709206281"} 2024-12-09T01:53:26,282 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733709206281"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733709206281"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733709206281"}]},"ts":"1733709206281"} 2024-12-09T01:53:26,282 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1733709206281"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733709206281"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733709206281"}]},"ts":"1733709206281"} 2024-12-09T01:53:26,300 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c6ba1446e1f5c7a309d87608150019a6, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59d8a49309daf32c4dda194bc963c4f3, ASSIGN}] 2024-12-09T01:53:26,301 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c6ba1446e1f5c7a309d87608150019a6, ASSIGN 2024-12-09T01:53:26,301 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59d8a49309daf32c4dda194bc963c4f3, ASSIGN 2024-12-09T01:53:26,302 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c6ba1446e1f5c7a309d87608150019a6, ASSIGN; state=SPLITTING_NEW, location=ef6f18c58dc9,34803,1733709180806; forceNewPlan=false, retain=false 2024-12-09T01:53:26,302 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59d8a49309daf32c4dda194bc963c4f3, ASSIGN; state=SPLITTING_NEW, location=ef6f18c58dc9,34803,1733709180806; forceNewPlan=false, retain=false 2024-12-09T01:53:26,453 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=c6ba1446e1f5c7a309d87608150019a6, regionState=OPENING, regionLocation=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:26,453 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=59d8a49309daf32c4dda194bc963c4f3, regionState=OPENING, regionLocation=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:26,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59d8a49309daf32c4dda194bc963c4f3, ASSIGN because future has completed 2024-12-09T01:53:26,456 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806}] 2024-12-09T01:53:26,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c6ba1446e1f5c7a309d87608150019a6, ASSIGN because future has completed 2024-12-09T01:53:26,457 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure c6ba1446e1f5c7a309d87608150019a6, server=ef6f18c58dc9,34803,1733709180806}] 2024-12-09T01:53:26,612 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:53:26,612 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 59d8a49309daf32c4dda194bc963c4f3, NAME => 'TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.', STARTKEY => 'row0062', ENDKEY => ''} 2024-12-09T01:53:26,612 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:26,612 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:53:26,612 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:26,612 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:26,614 INFO [StoreOpener-59d8a49309daf32c4dda194bc963c4f3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:26,615 INFO [StoreOpener-59d8a49309daf32c4dda194bc963c4f3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59d8a49309daf32c4dda194bc963c4f3 columnFamilyName info 2024-12-09T01:53:26,615 DEBUG [StoreOpener-59d8a49309daf32c4dda194bc963c4f3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:26,627 DEBUG [StoreOpener-59d8a49309daf32c4dda194bc963c4f3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707->hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/225030c839dc4db3a3b3e2ee4c255c2b-top 2024-12-09T01:53:26,632 DEBUG [StoreOpener-59d8a49309daf32c4dda194bc963c4f3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-1501ce5fc0254636a9b491e198eabf55 2024-12-09T01:53:26,636 DEBUG [StoreOpener-59d8a49309daf32c4dda194bc963c4f3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-b23851a53c7e4c699e50179884d33738 2024-12-09T01:53:26,640 DEBUG [StoreOpener-59d8a49309daf32c4dda194bc963c4f3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-c1d7461714734236a5870e2e1645a5c2 2024-12-09T01:53:26,640 INFO [StoreOpener-59d8a49309daf32c4dda194bc963c4f3-1 {}] regionserver.HStore(327): Store=59d8a49309daf32c4dda194bc963c4f3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:53:26,640 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:26,641 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:26,642 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:26,642 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:26,642 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:26,644 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:26,645 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 59d8a49309daf32c4dda194bc963c4f3; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=829642, jitterRate=0.0549444854259491}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T01:53:26,645 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:26,645 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 59d8a49309daf32c4dda194bc963c4f3: Running coprocessor pre-open hook at 1733709206612Writing region info on filesystem at 1733709206613 (+1 ms)Initializing all the Stores at 1733709206613Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709206613Cleaning up temporary data from old regions at 1733709206642 (+29 ms)Running coprocessor post-open hooks at 1733709206645 (+3 ms)Region opened successfully at 1733709206645 2024-12-09T01:53:26,646 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., pid=13, masterSystemTime=1733709206608 2024-12-09T01:53:26,646 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 59d8a49309daf32c4dda194bc963c4f3:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T01:53:26,647 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:26,647 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-09T01:53:26,648 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:53:26,648 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1541): 59d8a49309daf32c4dda194bc963c4f3/info is initiating minor compaction (all files) 2024-12-09T01:53:26,648 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 59d8a49309daf32c4dda194bc963c4f3/info in TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:53:26,649 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707->hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/225030c839dc4db3a3b3e2ee4c255c2b-top, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-1501ce5fc0254636a9b491e198eabf55, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-c1d7461714734236a5870e2e1645a5c2, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-b23851a53c7e4c699e50179884d33738] into tmpdir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp, totalSize=120.8 K 2024-12-09T01:53:26,649 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:53:26,649 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:53:26,649 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6. 2024-12-09T01:53:26,649 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => c6ba1446e1f5c7a309d87608150019a6, NAME => 'TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6.', STARTKEY => '', ENDKEY => 'row0062'} 2024-12-09T01:53:26,649 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=83, earliestPutTs=1733709191691 2024-12-09T01:53:26,650 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:53:26,650 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:53:26,650 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:53:26,650 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:53:26,650 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=59d8a49309daf32c4dda194bc963c4f3, regionState=OPEN, openSeqNum=127, regionLocation=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:26,651 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-1501ce5fc0254636a9b491e198eabf55, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=99, earliestPutTs=1733709205877 2024-12-09T01:53:26,651 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-c1d7461714734236a5870e2e1645a5c2, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733709205903 2024-12-09T01:53:26,652 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-b23851a53c7e4c699e50179884d33738, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733709205929 2024-12-09T01:53:26,652 INFO [StoreOpener-c6ba1446e1f5c7a309d87608150019a6-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:53:26,652 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-12-09T01:53:26,652 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-12-09T01:53:26,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-12-09T01:53:26,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 because future has completed 2024-12-09T01:53:26,653 INFO [StoreOpener-c6ba1446e1f5c7a309d87608150019a6-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c6ba1446e1f5c7a309d87608150019a6 columnFamilyName info 2024-12-09T01:53:26,653 DEBUG [StoreOpener-c6ba1446e1f5c7a309d87608150019a6-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:53:26,657 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-12-09T01:53:26,657 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure 59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 in 198 msec 2024-12-09T01:53:26,658 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=59d8a49309daf32c4dda194bc963c4f3, ASSIGN in 357 msec 2024-12-09T01:53:26,670 DEBUG [StoreOpener-c6ba1446e1f5c7a309d87608150019a6-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6/info/225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707->hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/225030c839dc4db3a3b3e2ee4c255c2b-bottom 2024-12-09T01:53:26,671 INFO [StoreOpener-c6ba1446e1f5c7a309d87608150019a6-1 {}] regionserver.HStore(327): Store=c6ba1446e1f5c7a309d87608150019a6/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:53:26,671 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:53:26,672 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:53:26,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/.tmp/info/1556a4a26d9d4ef29b709e55844e17f3 is 193, key is TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3./info:regioninfo/1733709206650/Put/seqid=0 2024-12-09T01:53:26,674 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:53:26,675 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:53:26,675 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:53:26,677 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:53:26,678 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened c6ba1446e1f5c7a309d87608150019a6; next sequenceid=127; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862175, jitterRate=0.09631332755088806}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T01:53:26,678 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:53:26,678 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for c6ba1446e1f5c7a309d87608150019a6: Running coprocessor pre-open hook at 1733709206650Writing region info on filesystem at 1733709206650Initializing all the Stores at 1733709206651 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709206651Cleaning up temporary data from old regions at 1733709206675 (+24 ms)Running coprocessor post-open hooks at 1733709206678 (+3 ms)Region opened successfully at 1733709206678 2024-12-09T01:53:26,680 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6., pid=14, masterSystemTime=1733709206608 2024-12-09T01:53:26,680 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store c6ba1446e1f5c7a309d87608150019a6:info, priority=-2147483648, current under compaction store size is 2 2024-12-09T01:53:26,680 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:26,680 DEBUG [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-12-09T01:53:26,681 INFO [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6. 2024-12-09T01:53:26,681 DEBUG [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] regionserver.HStore(1541): c6ba1446e1f5c7a309d87608150019a6/info is initiating minor compaction (all files) 2024-12-09T01:53:26,681 INFO [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c6ba1446e1f5c7a309d87608150019a6/info in TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6. 2024-12-09T01:53:26,681 INFO [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6/info/225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707->hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/225030c839dc4db3a3b3e2ee4c255c2b-bottom] into tmpdir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6/.tmp, totalSize=73.6 K 2024-12-09T01:53:26,682 DEBUG [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] compactions.Compactor(225): Compacting 225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1733709191691 2024-12-09T01:53:26,682 DEBUG [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6. 2024-12-09T01:53:26,682 INFO [RS_OPEN_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6. 2024-12-09T01:53:26,683 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=c6ba1446e1f5c7a309d87608150019a6, regionState=OPEN, openSeqNum=127, regionLocation=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:26,686 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure c6ba1446e1f5c7a309d87608150019a6, server=ef6f18c58dc9,34803,1733709180806 because future has completed 2024-12-09T01:53:26,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741851_1027 (size=9882) 2024-12-09T01:53:26,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741851_1027 (size=9882) 2024-12-09T01:53:26,688 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/.tmp/info/1556a4a26d9d4ef29b709e55844e17f3 2024-12-09T01:53:26,692 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59d8a49309daf32c4dda194bc963c4f3#info#compaction#66 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:53:26,693 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/26a24ff9c02a4f4fba879844622ef9b3 is 1080, key is row0062/info:/1733709203868/Put/seqid=0 2024-12-09T01:53:26,698 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=11 2024-12-09T01:53:26,698 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure c6ba1446e1f5c7a309d87608150019a6, server=ef6f18c58dc9,34803,1733709180806 in 231 msec 2024-12-09T01:53:26,701 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-12-09T01:53:26,701 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c6ba1446e1f5c7a309d87608150019a6, ASSIGN in 398 msec 2024-12-09T01:53:26,703 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=016b1ee3f2107d7f0bb81894adf8f707, daughterA=c6ba1446e1f5c7a309d87608150019a6, daughterB=59d8a49309daf32c4dda194bc963c4f3 in 741 msec 2024-12-09T01:53:26,703 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=016b1ee3f2107d7f0bb81894adf8f707, daughterA=37adab5961fcb6e563627cd4fbfd129d, daughterB=2dbf60516aca990459dba4b32dc73b3c 2024-12-09T01:53:26,704 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=016b1ee3f2107d7f0bb81894adf8f707, daughterA=37adab5961fcb6e563627cd4fbfd129d, daughterB=2dbf60516aca990459dba4b32dc73b3c 2024-12-09T01:53:26,704 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=016b1ee3f2107d7f0bb81894adf8f707, daughterA=37adab5961fcb6e563627cd4fbfd129d, daughterB=2dbf60516aca990459dba4b32dc73b3c 2024-12-09T01:53:26,705 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => 016b1ee3f2107d7f0bb81894adf8f707, NAME => 'TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-12-09T01:53:26,706 INFO [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c6ba1446e1f5c7a309d87608150019a6#info#compaction#67 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:53:26,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=016b1ee3f2107d7f0bb81894adf8f707, daughterA=37adab5961fcb6e563627cd4fbfd129d, daughterB=2dbf60516aca990459dba4b32dc73b3c in 738 msec 2024-12-09T01:53:26,707 DEBUG [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6/.tmp/info/473828ea63394f37b2484faaa00447ef is 1080, key is row0001/info:/1733709191691/Put/seqid=0 2024-12-09T01:53:26,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741852_1028 (size=43081) 2024-12-09T01:53:26,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741852_1028 (size=43081) 2024-12-09T01:53:26,720 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/26a24ff9c02a4f4fba879844622ef9b3 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/26a24ff9c02a4f4fba879844622ef9b3 2024-12-09T01:53:26,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/.tmp/ns/6e25031c8e25444382413db3e591c2c9 is 43, key is default/ns:d/1733709181616/Put/seqid=0 2024-12-09T01:53:26,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741853_1029 (size=70862) 2024-12-09T01:53:26,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741853_1029 (size=70862) 2024-12-09T01:53:26,727 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 59d8a49309daf32c4dda194bc963c4f3/info of 59d8a49309daf32c4dda194bc963c4f3 into 26a24ff9c02a4f4fba879844622ef9b3(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:53:26,727 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:26,727 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., storeName=59d8a49309daf32c4dda194bc963c4f3/info, priority=12, startTime=1733709206646; duration=0sec 2024-12-09T01:53:26,727 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:26,727 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59d8a49309daf32c4dda194bc963c4f3:info 2024-12-09T01:53:26,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741854_1030 (size=5153) 2024-12-09T01:53:26,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741854_1030 (size=5153) 2024-12-09T01:53:26,731 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/.tmp/ns/6e25031c8e25444382413db3e591c2c9 2024-12-09T01:53:26,732 DEBUG [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6/.tmp/info/473828ea63394f37b2484faaa00447ef as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6/info/473828ea63394f37b2484faaa00447ef 2024-12-09T01:53:26,738 INFO [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in c6ba1446e1f5c7a309d87608150019a6/info of c6ba1446e1f5c7a309d87608150019a6 into 473828ea63394f37b2484faaa00447ef(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:53:26,738 DEBUG [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c6ba1446e1f5c7a309d87608150019a6: 2024-12-09T01:53:26,738 INFO [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6., storeName=c6ba1446e1f5c7a309d87608150019a6/info, priority=15, startTime=1733709206680; duration=0sec 2024-12-09T01:53:26,738 DEBUG [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:26,738 DEBUG [RS:0;ef6f18c58dc9:34803-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c6ba1446e1f5c7a309d87608150019a6:info 2024-12-09T01:53:26,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/.tmp/table/9b2a5724cb59461a98bee0a834153305 is 65, key is TestLogRolling-testLogRolling/table:state/1733709182005/Put/seqid=0 2024-12-09T01:53:26,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741855_1031 (size=5340) 2024-12-09T01:53:26,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741855_1031 (size=5340) 2024-12-09T01:53:26,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/.tmp/table/9b2a5724cb59461a98bee0a834153305 2024-12-09T01:53:26,769 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/.tmp/info/1556a4a26d9d4ef29b709e55844e17f3 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/info/1556a4a26d9d4ef29b709e55844e17f3 2024-12-09T01:53:26,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/info/1556a4a26d9d4ef29b709e55844e17f3, entries=30, sequenceid=17, filesize=9.7 K 2024-12-09T01:53:26,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/.tmp/ns/6e25031c8e25444382413db3e591c2c9 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/ns/6e25031c8e25444382413db3e591c2c9 2024-12-09T01:53:26,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/ns/6e25031c8e25444382413db3e591c2c9, entries=2, sequenceid=17, filesize=5.0 K 2024-12-09T01:53:26,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/.tmp/table/9b2a5724cb59461a98bee0a834153305 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/table/9b2a5724cb59461a98bee0a834153305 2024-12-09T01:53:26,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/table/9b2a5724cb59461a98bee0a834153305, entries=2, sequenceid=17, filesize=5.2 K 2024-12-09T01:53:26,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 135ms, sequenceid=17, compaction requested=false 2024-12-09T01:53:26,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T01:53:27,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:27,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:27,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47262 deadline: 1733709217940, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. is not online on ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:27,941 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. is not online on ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T01:53:27,941 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707. is not online on ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T01:53:27,941 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1733709181644.016b1ee3f2107d7f0bb81894adf8f707., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=2 from cache 2024-12-09T01:53:28,009 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:28,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:28,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:29,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:29,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:29,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:30,010 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:30,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:30,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:30,745 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T01:53:31,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:31,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,176 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,177 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:31,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,195 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,196 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,199 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,200 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:31,708 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T01:53:31,708 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,709 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,710 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,731 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,732 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,735 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:31,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:53:32,011 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:32,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:32,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:33,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:33,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:33,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:34,012 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:34,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:34,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:35,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:35,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:35,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:36,013 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:36,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:36,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:37,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:37,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:37,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:38,011 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127] 2024-12-09T01:53:38,014 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:38,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:38,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T01:53:38,028 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/4a5694bea3954b81960c8157d9852bab is 1080, key is row0097/info:/1733709218012/Put/seqid=0 2024-12-09T01:53:38,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741856_1032 (size=12516) 2024-12-09T01:53:38,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741856_1032 (size=12516) 2024-12-09T01:53:38,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/4a5694bea3954b81960c8157d9852bab 2024-12-09T01:53:38,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/4a5694bea3954b81960c8157d9852bab as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4a5694bea3954b81960c8157d9852bab 2024-12-09T01:53:38,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4a5694bea3954b81960c8157d9852bab, entries=7, sequenceid=137, filesize=12.2 K 2024-12-09T01:53:38,049 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 59d8a49309daf32c4dda194bc963c4f3 in 26ms, sequenceid=137, compaction requested=false 2024-12-09T01:53:38,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:38,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:38,051 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-12-09T01:53:38,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/73fe4ae2a8fa4cdcbd4af6a5b3af6086 is 1080, key is row0104/info:/1733709218024/Put/seqid=0 2024-12-09T01:53:38,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741857_1033 (size=22238) 2024-12-09T01:53:38,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741857_1033 (size=22238) 2024-12-09T01:53:38,061 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/73fe4ae2a8fa4cdcbd4af6a5b3af6086 2024-12-09T01:53:38,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/73fe4ae2a8fa4cdcbd4af6a5b3af6086 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/73fe4ae2a8fa4cdcbd4af6a5b3af6086 2024-12-09T01:53:38,072 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/73fe4ae2a8fa4cdcbd4af6a5b3af6086, entries=16, sequenceid=156, filesize=21.7 K 2024-12-09T01:53:38,073 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=9.46 KB/9684 for 59d8a49309daf32c4dda194bc963c4f3 in 22ms, sequenceid=156, compaction requested=true 2024-12-09T01:53:38,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:38,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59d8a49309daf32c4dda194bc963c4f3:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T01:53:38,073 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:38,073 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T01:53:38,074 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 77835 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T01:53:38,074 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1541): 59d8a49309daf32c4dda194bc963c4f3/info is initiating minor compaction (all files) 2024-12-09T01:53:38,074 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 59d8a49309daf32c4dda194bc963c4f3/info in TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:53:38,074 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/26a24ff9c02a4f4fba879844622ef9b3, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4a5694bea3954b81960c8157d9852bab, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/73fe4ae2a8fa4cdcbd4af6a5b3af6086] into tmpdir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp, totalSize=76.0 K 2024-12-09T01:53:38,075 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 26a24ff9c02a4f4fba879844622ef9b3, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=123, earliestPutTs=1733709203868 2024-12-09T01:53:38,075 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4a5694bea3954b81960c8157d9852bab, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733709218012 2024-12-09T01:53:38,075 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 73fe4ae2a8fa4cdcbd4af6a5b3af6086, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733709218024 2024-12-09T01:53:38,086 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59d8a49309daf32c4dda194bc963c4f3#info#compaction#72 average throughput is 59.52 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:53:38,087 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/7e2a73c6aab343a1a21007395d8dca85 is 1080, key is row0062/info:/1733709203868/Put/seqid=0 2024-12-09T01:53:38,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741858_1034 (size=68045) 2024-12-09T01:53:38,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741858_1034 (size=68045) 2024-12-09T01:53:38,100 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/7e2a73c6aab343a1a21007395d8dca85 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/7e2a73c6aab343a1a21007395d8dca85 2024-12-09T01:53:38,106 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 59d8a49309daf32c4dda194bc963c4f3/info of 59d8a49309daf32c4dda194bc963c4f3 into 7e2a73c6aab343a1a21007395d8dca85(size=66.5 K), total size for store is 66.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:53:38,107 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:38,107 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., storeName=59d8a49309daf32c4dda194bc963c4f3/info, priority=13, startTime=1733709218073; duration=0sec 2024-12-09T01:53:38,107 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:38,107 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59d8a49309daf32c4dda194bc963c4f3:info 2024-12-09T01:53:38,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:38,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:39,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:39,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:39,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:40,015 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:40,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:40,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-12-09T01:53:40,074 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/a7e624fffb784e43b7e4c6a4a516f6b0 is 1080, key is row0120/info:/1733709218051/Put/seqid=0 2024-12-09T01:53:40,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741859_1035 (size=15750) 2024-12-09T01:53:40,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741859_1035 (size=15750) 2024-12-09T01:53:40,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/a7e624fffb784e43b7e4c6a4a516f6b0 2024-12-09T01:53:40,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/a7e624fffb784e43b7e4c6a4a516f6b0 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/a7e624fffb784e43b7e4c6a4a516f6b0 2024-12-09T01:53:40,094 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/a7e624fffb784e43b7e4c6a4a516f6b0, entries=10, sequenceid=170, filesize=15.4 K 2024-12-09T01:53:40,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=16.81 KB/17216 for 59d8a49309daf32c4dda194bc963c4f3 in 26ms, sequenceid=170, compaction requested=false 2024-12-09T01:53:40,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:40,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:40,096 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-12-09T01:53:40,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/4b4f8108f9884643a97244b533ac97ba is 1080, key is row0130/info:/1733709220070/Put/seqid=0 2024-12-09T01:53:40,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741860_1036 (size=23316) 2024-12-09T01:53:40,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741860_1036 (size=23316) 2024-12-09T01:53:40,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=190 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/4b4f8108f9884643a97244b533ac97ba 2024-12-09T01:53:40,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/4b4f8108f9884643a97244b533ac97ba as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4b4f8108f9884643a97244b533ac97ba 2024-12-09T01:53:40,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-09T01:53:40,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47262 deadline: 1733709230117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:40,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4b4f8108f9884643a97244b533ac97ba, entries=17, sequenceid=190, filesize=22.8 K 2024-12-09T01:53:40,118 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T01:53:40,118 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T01:53:40,118 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127 because the exception is null or not the one we care about 2024-12-09T01:53:40,119 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=12.61 KB/12912 for 59d8a49309daf32c4dda194bc963c4f3 in 23ms, sequenceid=190, compaction requested=true 2024-12-09T01:53:40,119 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:40,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59d8a49309daf32c4dda194bc963c4f3:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T01:53:40,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:40,119 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T01:53:40,121 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 107111 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T01:53:40,121 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1541): 59d8a49309daf32c4dda194bc963c4f3/info is initiating minor compaction (all files) 2024-12-09T01:53:40,121 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 59d8a49309daf32c4dda194bc963c4f3/info in TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:53:40,121 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/7e2a73c6aab343a1a21007395d8dca85, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/a7e624fffb784e43b7e4c6a4a516f6b0, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4b4f8108f9884643a97244b533ac97ba] into tmpdir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp, totalSize=104.6 K 2024-12-09T01:53:40,121 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7e2a73c6aab343a1a21007395d8dca85, keycount=58, bloomtype=ROW, size=66.5 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733709203868 2024-12-09T01:53:40,122 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting a7e624fffb784e43b7e4c6a4a516f6b0, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733709218051 2024-12-09T01:53:40,122 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4b4f8108f9884643a97244b533ac97ba, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733709220070 2024-12-09T01:53:40,133 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59d8a49309daf32c4dda194bc963c4f3#info#compaction#75 average throughput is 43.61 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:53:40,134 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/100e8268ff4848b6add70e86adfb950b is 1080, key is row0062/info:/1733709203868/Put/seqid=0 2024-12-09T01:53:40,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741861_1037 (size=97330) 2024-12-09T01:53:40,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741861_1037 (size=97330) 2024-12-09T01:53:40,148 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/100e8268ff4848b6add70e86adfb950b as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/100e8268ff4848b6add70e86adfb950b 2024-12-09T01:53:40,154 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 59d8a49309daf32c4dda194bc963c4f3/info of 59d8a49309daf32c4dda194bc963c4f3 into 100e8268ff4848b6add70e86adfb950b(size=95.0 K), total size for store is 95.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:53:40,154 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:40,154 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., storeName=59d8a49309daf32c4dda194bc963c4f3/info, priority=13, startTime=1733709220119; duration=0sec 2024-12-09T01:53:40,154 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:40,154 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59d8a49309daf32c4dda194bc963c4f3:info 2024-12-09T01:53:40,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:40,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:41,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:41,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:41,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:41,698 INFO [master/ef6f18c58dc9:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-09T01:53:41,698 INFO [master/ef6f18c58dc9:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-09T01:53:42,016 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:42,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:42,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:43,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:43,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:43,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:44,017 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:44,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:44,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:45,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:45,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:45,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:46,018 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:46,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:46,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:46,586 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20375 2024-12-09T01:53:47,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:47,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:47,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:48,019 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:48,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:48,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:49,020 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:49,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:49,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:50,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:50,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:50,156 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-12-09T01:53:50,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/b34eabc108cf4006ae5f82a3798a4223 is 1080, key is row0147/info:/1733709220097/Put/seqid=0 2024-12-09T01:53:50,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741862_1038 (size=19000) 2024-12-09T01:53:50,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741862_1038 (size=19000) 2024-12-09T01:53:50,166 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/b34eabc108cf4006ae5f82a3798a4223 2024-12-09T01:53:50,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/b34eabc108cf4006ae5f82a3798a4223 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/b34eabc108cf4006ae5f82a3798a4223 2024-12-09T01:53:50,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/b34eabc108cf4006ae5f82a3798a4223, entries=13, sequenceid=207, filesize=18.6 K 2024-12-09T01:53:50,177 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=1.05 KB/1076 for 59d8a49309daf32c4dda194bc963c4f3 in 21ms, sequenceid=207, compaction requested=false 2024-12-09T01:53:50,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:50,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:50,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:51,021 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:51,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:51,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:52,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:52,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:52,169 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T01:53:52,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/65581665fff04b2d9b5d19ef9c822dfb is 1080, key is row0160/info:/1733709230157/Put/seqid=0 2024-12-09T01:53:52,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741863_1039 (size=12516) 2024-12-09T01:53:52,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741863_1039 (size=12516) 2024-12-09T01:53:52,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/65581665fff04b2d9b5d19ef9c822dfb 2024-12-09T01:53:52,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/65581665fff04b2d9b5d19ef9c822dfb as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/65581665fff04b2d9b5d19ef9c822dfb 2024-12-09T01:53:52,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/65581665fff04b2d9b5d19ef9c822dfb, entries=7, sequenceid=217, filesize=12.2 K 2024-12-09T01:53:52,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=14.71 KB/15064 for 59d8a49309daf32c4dda194bc963c4f3 in 23ms, sequenceid=217, compaction requested=true 2024-12-09T01:53:52,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:52,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59d8a49309daf32c4dda194bc963c4f3:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T01:53:52,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:52,192 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T01:53:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:52,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-12-09T01:53:52,194 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 128846 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T01:53:52,194 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1541): 59d8a49309daf32c4dda194bc963c4f3/info is initiating minor compaction (all files) 2024-12-09T01:53:52,194 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 59d8a49309daf32c4dda194bc963c4f3/info in TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:53:52,194 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/100e8268ff4848b6add70e86adfb950b, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/b34eabc108cf4006ae5f82a3798a4223, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/65581665fff04b2d9b5d19ef9c822dfb] into tmpdir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp, totalSize=125.8 K 2024-12-09T01:53:52,194 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 100e8268ff4848b6add70e86adfb950b, keycount=85, bloomtype=ROW, size=95.0 K, encoding=NONE, compression=NONE, seqNum=190, earliestPutTs=1733709203868 2024-12-09T01:53:52,195 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting b34eabc108cf4006ae5f82a3798a4223, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733709220097 2024-12-09T01:53:52,195 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 65581665fff04b2d9b5d19ef9c822dfb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733709230157 2024-12-09T01:53:52,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/9cb788ddbb4d435d9c909a01fbf12a95 is 1080, key is row0167/info:/1733709232170/Put/seqid=0 2024-12-09T01:53:52,203 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:52,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741864_1040 (size=21156) 2024-12-09T01:53:52,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741864_1040 (size=21156) 2024-12-09T01:53:52,211 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59d8a49309daf32c4dda194bc963c4f3#info#compaction#79 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:53:52,212 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/ffd1818e1a684e7e940a2aaf62df3f7e is 1080, key is row0062/info:/1733709203868/Put/seqid=0 2024-12-09T01:53:52,212 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/9cb788ddbb4d435d9c909a01fbf12a95 2024-12-09T01:53:52,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741865_1041 (size=118996) 2024-12-09T01:53:52,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741865_1041 (size=118996) 2024-12-09T01:53:52,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/9cb788ddbb4d435d9c909a01fbf12a95 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/9cb788ddbb4d435d9c909a01fbf12a95 2024-12-09T01:53:52,223 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/ffd1818e1a684e7e940a2aaf62df3f7e as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/ffd1818e1a684e7e940a2aaf62df3f7e 2024-12-09T01:53:52,224 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/9cb788ddbb4d435d9c909a01fbf12a95, entries=15, sequenceid=235, filesize=20.7 K 2024-12-09T01:53:52,226 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 59d8a49309daf32c4dda194bc963c4f3 in 32ms, sequenceid=235, compaction requested=false 2024-12-09T01:53:52,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:52,229 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 59d8a49309daf32c4dda194bc963c4f3/info of 59d8a49309daf32c4dda194bc963c4f3 into ffd1818e1a684e7e940a2aaf62df3f7e(size=116.2 K), total size for store is 136.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:53:52,229 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:52,229 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., storeName=59d8a49309daf32c4dda194bc963c4f3/info, priority=13, startTime=1733709232192; duration=0sec 2024-12-09T01:53:52,229 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:52,229 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59d8a49309daf32c4dda194bc963c4f3:info 2024-12-09T01:53:52,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:53,022 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:53,204 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:53,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:54,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:54,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:54,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:53:54,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-09T01:53:54,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/b06c6089b8b4449f9044e767071e706c is 1080, key is row0182/info:/1733709232194/Put/seqid=0 2024-12-09T01:53:54,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741866_1042 (size=17906) 2024-12-09T01:53:54,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741866_1042 (size=17906) 2024-12-09T01:53:54,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-09T01:53:54,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47262 deadline: 1733709244246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:53:54,247 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T01:53:54,247 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T01:53:54,247 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127 because the exception is null or not the one we care about 2024-12-09T01:53:54,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:54,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/b06c6089b8b4449f9044e767071e706c 2024-12-09T01:53:54,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/b06c6089b8b4449f9044e767071e706c as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/b06c6089b8b4449f9044e767071e706c 2024-12-09T01:53:54,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/b06c6089b8b4449f9044e767071e706c, entries=12, sequenceid=251, filesize=17.5 K 2024-12-09T01:53:54,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for 59d8a49309daf32c4dda194bc963c4f3 in 434ms, sequenceid=251, compaction requested=true 2024-12-09T01:53:54,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:54,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59d8a49309daf32c4dda194bc963c4f3:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T01:53:54,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:54,653 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T01:53:54,653 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 158058 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T01:53:54,653 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1541): 59d8a49309daf32c4dda194bc963c4f3/info is initiating minor compaction (all files) 2024-12-09T01:53:54,653 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 59d8a49309daf32c4dda194bc963c4f3/info in TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:53:54,654 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/ffd1818e1a684e7e940a2aaf62df3f7e, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/9cb788ddbb4d435d9c909a01fbf12a95, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/b06c6089b8b4449f9044e767071e706c] into tmpdir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp, totalSize=154.4 K 2024-12-09T01:53:54,654 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting ffd1818e1a684e7e940a2aaf62df3f7e, keycount=105, bloomtype=ROW, size=116.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1733709203868 2024-12-09T01:53:54,654 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9cb788ddbb4d435d9c909a01fbf12a95, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733709232170 2024-12-09T01:53:54,655 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting b06c6089b8b4449f9044e767071e706c, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733709232194 2024-12-09T01:53:54,666 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59d8a49309daf32c4dda194bc963c4f3#info#compaction#81 average throughput is 45.15 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:53:54,667 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/da2529d12cbd42e0b9cce10f501d29dc is 1080, key is row0062/info:/1733709203868/Put/seqid=0 2024-12-09T01:53:54,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741867_1043 (size=148409) 2024-12-09T01:53:54,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741867_1043 (size=148409) 2024-12-09T01:53:54,678 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/da2529d12cbd42e0b9cce10f501d29dc as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/da2529d12cbd42e0b9cce10f501d29dc 2024-12-09T01:53:54,684 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 59d8a49309daf32c4dda194bc963c4f3/info of 59d8a49309daf32c4dda194bc963c4f3 into da2529d12cbd42e0b9cce10f501d29dc(size=144.9 K), total size for store is 144.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:53:54,684 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:53:54,684 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., storeName=59d8a49309daf32c4dda194bc963c4f3/info, priority=13, startTime=1733709234652; duration=0sec 2024-12-09T01:53:54,684 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:53:54,684 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59d8a49309daf32c4dda194bc963c4f3:info 2024-12-09T01:53:55,023 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:55,205 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:55,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:56,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:56,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:56,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:57,024 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:57,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:57,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:58,025 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:58,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:58,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:59,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:59,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:53:59,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:00,026 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:00,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:00,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:00,745 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-09T01:54:01,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:01,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:01,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:02,027 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:02,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:02,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:03,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:03,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:03,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:03,508 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=3, created chunk count=9, reused chunk count=67, reuseRatio=88.16% 2024-12-09T01:54:03,509 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-09T01:54:04,028 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:04,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:04,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:04,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:54:04,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-12-09T01:54:04,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/3c8bb465a4324e2e940a207d796b8ab6 is 1080, key is row0194/info:/1733709234219/Put/seqid=0 2024-12-09T01:54:04,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741868_1044 (size=24409) 2024-12-09T01:54:04,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741868_1044 (size=24409) 2024-12-09T01:54:04,298 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=273 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/3c8bb465a4324e2e940a207d796b8ab6 2024-12-09T01:54:04,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/3c8bb465a4324e2e940a207d796b8ab6 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/3c8bb465a4324e2e940a207d796b8ab6 2024-12-09T01:54:04,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-09T01:54:04,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:47262 deadline: 1733709254308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 2024-12-09T01:54:04,309 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127 , the old value is region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T01:54:04,309 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=59d8a49309daf32c4dda194bc963c4f3, server=ef6f18c58dc9,34803,1733709180806 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-09T01:54:04,309 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., hostname=ef6f18c58dc9,34803,1733709180806, seqNum=127 because the exception is null or not the one we care about 2024-12-09T01:54:04,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/3c8bb465a4324e2e940a207d796b8ab6, entries=18, sequenceid=273, filesize=23.8 K 2024-12-09T01:54:04,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=11.56 KB/11836 for 59d8a49309daf32c4dda194bc963c4f3 in 25ms, sequenceid=273, compaction requested=false 2024-12-09T01:54:04,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:54:05,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:05,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:05,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:06,029 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:06,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:06,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:07,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:07,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:07,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:08,030 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:08,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:08,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:09,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:09,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:09,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:10,031 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:10,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,169 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,170 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,170 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,170 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,192 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,194 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:10,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:10,700 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T01:54:10,700 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,722 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,725 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:10,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T01:54:11,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:11,032 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=3 on file=hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 after 196132ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T01:54:11,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:11,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:11,612 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 59d8a49309daf32c4dda194bc963c4f3, had cached 0 bytes from a total of 172818 2024-12-09T01:54:11,650 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region c6ba1446e1f5c7a309d87608150019a6, had cached 0 bytes from a total of 70862 2024-12-09T01:54:12,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:12,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:12,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:13,033 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:13,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:13,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:14,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:14,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:14,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:14,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:54:14,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-12-09T01:54:14,402 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/1eeed9d1f89a48cc84c97bd1ee6350b6 is 1080, key is row0212/info:/1733709244288/Put/seqid=0 2024-12-09T01:54:14,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741869_1045 (size=17918) 2024-12-09T01:54:14,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741869_1045 (size=17918) 2024-12-09T01:54:14,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/1eeed9d1f89a48cc84c97bd1ee6350b6 2024-12-09T01:54:14,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/1eeed9d1f89a48cc84c97bd1ee6350b6 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/1eeed9d1f89a48cc84c97bd1ee6350b6 2024-12-09T01:54:14,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/1eeed9d1f89a48cc84c97bd1ee6350b6, entries=12, sequenceid=288, filesize=17.5 K 2024-12-09T01:54:14,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=1.05 KB/1076 for 59d8a49309daf32c4dda194bc963c4f3 in 24ms, sequenceid=288, compaction requested=true 2024-12-09T01:54:14,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:54:14,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59d8a49309daf32c4dda194bc963c4f3:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T01:54:14,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:54:14,420 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T01:54:14,421 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190736 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T01:54:14,421 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1541): 59d8a49309daf32c4dda194bc963c4f3/info is initiating minor compaction (all files) 2024-12-09T01:54:14,421 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 59d8a49309daf32c4dda194bc963c4f3/info in TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:54:14,421 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/da2529d12cbd42e0b9cce10f501d29dc, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/3c8bb465a4324e2e940a207d796b8ab6, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/1eeed9d1f89a48cc84c97bd1ee6350b6] into tmpdir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp, totalSize=186.3 K 2024-12-09T01:54:14,422 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting da2529d12cbd42e0b9cce10f501d29dc, keycount=132, bloomtype=ROW, size=144.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733709203868 2024-12-09T01:54:14,422 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3c8bb465a4324e2e940a207d796b8ab6, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=273, earliestPutTs=1733709234219 2024-12-09T01:54:14,423 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1eeed9d1f89a48cc84c97bd1ee6350b6, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1733709244288 2024-12-09T01:54:14,434 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59d8a49309daf32c4dda194bc963c4f3#info#compaction#84 average throughput is 55.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:54:14,435 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/71ba5276362244f6a9c6c5544a0d3e50 is 1080, key is row0062/info:/1733709203868/Put/seqid=0 2024-12-09T01:54:14,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741870_1046 (size=180886) 2024-12-09T01:54:14,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741870_1046 (size=180886) 2024-12-09T01:54:14,454 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/71ba5276362244f6a9c6c5544a0d3e50 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/71ba5276362244f6a9c6c5544a0d3e50 2024-12-09T01:54:14,460 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 59d8a49309daf32c4dda194bc963c4f3/info of 59d8a49309daf32c4dda194bc963c4f3 into 71ba5276362244f6a9c6c5544a0d3e50(size=176.6 K), total size for store is 176.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:54:14,460 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:54:14,460 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., storeName=59d8a49309daf32c4dda194bc963c4f3/info, priority=13, startTime=1733709254420; duration=0sec 2024-12-09T01:54:14,460 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:54:14,460 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59d8a49309daf32c4dda194bc963c4f3:info 2024-12-09T01:54:15,034 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:15,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:15,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:16,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:16,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:16,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:16,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:54:16,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-12-09T01:54:16,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/4d7aec9f378a49fbba63ff8696dc4798 is 1080, key is row0224/info:/1733709254398/Put/seqid=0 2024-12-09T01:54:16,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741871_1047 (size=12523) 2024-12-09T01:54:16,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741871_1047 (size=12523) 2024-12-09T01:54:16,427 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/4d7aec9f378a49fbba63ff8696dc4798 2024-12-09T01:54:16,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/4d7aec9f378a49fbba63ff8696dc4798 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4d7aec9f378a49fbba63ff8696dc4798 2024-12-09T01:54:16,438 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4d7aec9f378a49fbba63ff8696dc4798, entries=7, sequenceid=299, filesize=12.2 K 2024-12-09T01:54:16,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=21.02 KB/21520 for 59d8a49309daf32c4dda194bc963c4f3 in 31ms, sequenceid=299, compaction requested=false 2024-12-09T01:54:16,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:54:16,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34803 {}] regionserver.HRegion(8855): Flush requested on 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:54:16,440 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=22.07 KB heapSize=23.88 KB 2024-12-09T01:54:16,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/6f8c977a2ff04b3ea7aaf2a6849d08be is 1080, key is row0231/info:/1733709256409/Put/seqid=0 2024-12-09T01:54:16,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741872_1048 (size=27649) 2024-12-09T01:54:16,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741872_1048 (size=27649) 2024-12-09T01:54:16,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.07 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/6f8c977a2ff04b3ea7aaf2a6849d08be 2024-12-09T01:54:16,456 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/6f8c977a2ff04b3ea7aaf2a6849d08be as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/6f8c977a2ff04b3ea7aaf2a6849d08be 2024-12-09T01:54:16,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/6f8c977a2ff04b3ea7aaf2a6849d08be, entries=21, sequenceid=323, filesize=27.0 K 2024-12-09T01:54:16,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~22.07 KB/22596, heapSize ~23.86 KB/24432, currentSize=5.25 KB/5380 for 59d8a49309daf32c4dda194bc963c4f3 in 21ms, sequenceid=323, compaction requested=true 2024-12-09T01:54:16,461 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:54:16,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59d8a49309daf32c4dda194bc963c4f3:info, priority=-2147483648, current under compaction store size is 1 2024-12-09T01:54:16,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:54:16,461 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-09T01:54:16,462 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 221058 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-09T01:54:16,462 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1541): 59d8a49309daf32c4dda194bc963c4f3/info is initiating minor compaction (all files) 2024-12-09T01:54:16,462 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 59d8a49309daf32c4dda194bc963c4f3/info in TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:54:16,462 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/71ba5276362244f6a9c6c5544a0d3e50, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4d7aec9f378a49fbba63ff8696dc4798, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/6f8c977a2ff04b3ea7aaf2a6849d08be] into tmpdir=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp, totalSize=215.9 K 2024-12-09T01:54:16,463 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 71ba5276362244f6a9c6c5544a0d3e50, keycount=162, bloomtype=ROW, size=176.6 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1733709203868 2024-12-09T01:54:16,463 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4d7aec9f378a49fbba63ff8696dc4798, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1733709254398 2024-12-09T01:54:16,463 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6f8c977a2ff04b3ea7aaf2a6849d08be, keycount=21, bloomtype=ROW, size=27.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1733709256409 2024-12-09T01:54:16,475 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59d8a49309daf32c4dda194bc963c4f3#info#compaction#87 average throughput is 64.99 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-09T01:54:16,475 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/7bcef93322424c7992c2ed8ef891fc14 is 1080, key is row0062/info:/1733709203868/Put/seqid=0 2024-12-09T01:54:16,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741873_1049 (size=211277) 2024-12-09T01:54:16,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741873_1049 (size=211277) 2024-12-09T01:54:16,484 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/7bcef93322424c7992c2ed8ef891fc14 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/7bcef93322424c7992c2ed8ef891fc14 2024-12-09T01:54:16,489 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 59d8a49309daf32c4dda194bc963c4f3/info of 59d8a49309daf32c4dda194bc963c4f3 into 7bcef93322424c7992c2ed8ef891fc14(size=206.3 K), total size for store is 206.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-09T01:54:16,489 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:54:16,489 INFO [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3., storeName=59d8a49309daf32c4dda194bc963c4f3/info, priority=13, startTime=1733709256461; duration=0sec 2024-12-09T01:54:16,489 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-09T01:54:16,489 DEBUG [RS:0;ef6f18c58dc9:34803-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59d8a49309daf32c4dda194bc963c4f3:info 2024-12-09T01:54:17,035 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:17,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:17,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:18,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:18,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:18,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:18,450 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-12-09T01:54:18,450 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C34803%2C1733709180806.1733709258450 2024-12-09T01:54:18,467 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,467 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,467 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,467 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,467 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,468 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806/ef6f18c58dc9%2C34803%2C1733709180806.1733709181197 with entries=310, filesize=307.89 KB; new WAL /user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806/ef6f18c58dc9%2C34803%2C1733709180806.1733709258450 2024-12-09T01:54:18,468 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38505:38505),(127.0.0.1/127.0.0.1:33151:33151)] 2024-12-09T01:54:18,469 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806/ef6f18c58dc9%2C34803%2C1733709180806.1733709181197 is not closed yet, will try archiving it next time 2024-12-09T01:54:18,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741833_1009 (size=315283) 2024-12-09T01:54:18,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741833_1009 (size=315283) 2024-12-09T01:54:18,472 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for c6ba1446e1f5c7a309d87608150019a6: 2024-12-09T01:54:18,473 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-12-09T01:54:18,477 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/.tmp/info/0f06122f56424ad5b7349024241fdfce is 186, key is TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6./info:regioninfo/1733709206683/Put/seqid=0 2024-12-09T01:54:18,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741875_1051 (size=6153) 2024-12-09T01:54:18,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741875_1051 (size=6153) 2024-12-09T01:54:18,482 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/.tmp/info/0f06122f56424ad5b7349024241fdfce 2024-12-09T01:54:18,487 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/.tmp/info/0f06122f56424ad5b7349024241fdfce as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/info/0f06122f56424ad5b7349024241fdfce 2024-12-09T01:54:18,492 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/info/0f06122f56424ad5b7349024241fdfce, entries=5, sequenceid=21, filesize=6.0 K 2024-12-09T01:54:18,493 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 21ms, sequenceid=21, compaction requested=false 2024-12-09T01:54:18,493 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-12-09T01:54:18,493 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 59d8a49309daf32c4dda194bc963c4f3 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-12-09T01:54:18,497 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/e4515da95f414330afb5f534b263a847 is 1080, key is row0252/info:/1733709256441/Put/seqid=0 2024-12-09T01:54:18,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741876_1052 (size=10357) 2024-12-09T01:54:18,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741876_1052 (size=10357) 2024-12-09T01:54:18,504 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/e4515da95f414330afb5f534b263a847 2024-12-09T01:54:18,509 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/.tmp/info/e4515da95f414330afb5f534b263a847 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/e4515da95f414330afb5f534b263a847 2024-12-09T01:54:18,513 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/e4515da95f414330afb5f534b263a847, entries=5, sequenceid=332, filesize=10.1 K 2024-12-09T01:54:18,514 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for 59d8a49309daf32c4dda194bc963c4f3 in 21ms, sequenceid=332, compaction requested=false 2024-12-09T01:54:18,514 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 59d8a49309daf32c4dda194bc963c4f3: 2024-12-09T01:54:18,515 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C34803%2C1733709180806.1733709258515 2024-12-09T01:54:18,523 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,523 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,524 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,524 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,524 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,524 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806/ef6f18c58dc9%2C34803%2C1733709180806.1733709258450 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806/ef6f18c58dc9%2C34803%2C1733709180806.1733709258515 2024-12-09T01:54:18,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741874_1050 (size=731) 2024-12-09T01:54:18,526 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33151:33151),(127.0.0.1/127.0.0.1:38505:38505)] 2024-12-09T01:54:18,526 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806/ef6f18c58dc9%2C34803%2C1733709180806.1733709258450 is not closed yet, will try archiving it next time 2024-12-09T01:54:18,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741874_1050 (size=731) 2024-12-09T01:54:18,528 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806/ef6f18c58dc9%2C34803%2C1733709180806.1733709181197 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/oldWALs/ef6f18c58dc9%2C34803%2C1733709180806.1733709181197 2024-12-09T01:54:18,528 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T01:54:18,529 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T01:54:18,529 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:54:18,529 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/WALs/ef6f18c58dc9,34803,1733709180806/ef6f18c58dc9%2C34803%2C1733709180806.1733709258450 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/oldWALs/ef6f18c58dc9%2C34803%2C1733709180806.1733709258450 2024-12-09T01:54:18,529 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:54:18,529 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:54:18,529 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:54:18,529 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T01:54:18,529 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T01:54:18,529 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1265953606, stopped=false 2024-12-09T01:54:18,529 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef6f18c58dc9,33543,1733709180763 2024-12-09T01:54:18,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:54:18,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:18,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:54:18,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:18,531 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:54:18,532 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:54:18,532 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:54:18,532 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:54:18,532 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,34803,1733709180806' ***** 2024-12-09T01:54:18,532 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:54:18,532 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:54:18,532 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:54:18,533 INFO [RS:0;ef6f18c58dc9:34803 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:54:18,533 INFO [RS:0;ef6f18c58dc9:34803 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:54:18,533 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:54:18,533 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(3091): Received CLOSE for c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:54:18,533 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(3091): Received CLOSE for 59d8a49309daf32c4dda194bc963c4f3 2024-12-09T01:54:18,533 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,34803,1733709180806 2024-12-09T01:54:18,533 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:54:18,533 INFO [RS:0;ef6f18c58dc9:34803 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef6f18c58dc9:34803. 2024-12-09T01:54:18,533 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:54:18,533 DEBUG [RS:0;ef6f18c58dc9:34803 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:54:18,533 DEBUG [RS:0;ef6f18c58dc9:34803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:54:18,533 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c6ba1446e1f5c7a309d87608150019a6, disabling compactions & flushes 2024-12-09T01:54:18,533 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6. 2024-12-09T01:54:18,533 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:54:18,533 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6. 2024-12-09T01:54:18,533 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:54:18,533 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:54:18,533 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6. after waiting 0 ms 2024-12-09T01:54:18,533 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6. 2024-12-09T01:54:18,533 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T01:54:18,536 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-12-09T01:54:18,536 DEBUG [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(1325): Online Regions={c6ba1446e1f5c7a309d87608150019a6=TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6., 1588230740=hbase:meta,,1.1588230740, 59d8a49309daf32c4dda194bc963c4f3=TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.} 2024-12-09T01:54:18,536 DEBUG [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 59d8a49309daf32c4dda194bc963c4f3, c6ba1446e1f5c7a309d87608150019a6 2024-12-09T01:54:18,536 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:54:18,536 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:54:18,536 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:54:18,536 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:54:18,536 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6/info/225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707->hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/225030c839dc4db3a3b3e2ee4c255c2b-bottom] to archive 2024-12-09T01:54:18,536 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:54:18,537 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T01:54:18,539 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6/info/225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6/info/225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:54:18,539 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=ef6f18c58dc9:33543 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-12-09T01:54:18,539 WARN [StoreCloser-TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-12-09T01:54:18,544 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-12-09T01:54:18,545 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:54:18,545 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:54:18,545 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709258536Running coprocessor pre-close hooks at 1733709258536Disabling compacts and flushes for region at 1733709258536Disabling writes for close at 1733709258536Writing region close event to WAL at 1733709258538 (+2 ms)Running coprocessor post-close hooks at 1733709258545 (+7 ms)Closed at 1733709258545 2024-12-09T01:54:18,545 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T01:54:18,547 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/c6ba1446e1f5c7a309d87608150019a6/recovered.edits/131.seqid, newMaxSeqId=131, maxSeqId=126 2024-12-09T01:54:18,547 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6. 2024-12-09T01:54:18,547 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c6ba1446e1f5c7a309d87608150019a6: Waiting for close lock at 1733709258533Running coprocessor pre-close hooks at 1733709258533Disabling compacts and flushes for region at 1733709258533Disabling writes for close at 1733709258533Writing region close event to WAL at 1733709258544 (+11 ms)Running coprocessor post-close hooks at 1733709258547 (+3 ms)Closed at 1733709258547 2024-12-09T01:54:18,548 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1733709205960.c6ba1446e1f5c7a309d87608150019a6. 2024-12-09T01:54:18,548 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 59d8a49309daf32c4dda194bc963c4f3, disabling compactions & flushes 2024-12-09T01:54:18,548 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:54:18,548 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:54:18,548 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. after waiting 0 ms 2024-12-09T01:54:18,548 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:54:18,548 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707->hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/016b1ee3f2107d7f0bb81894adf8f707/info/225030c839dc4db3a3b3e2ee4c255c2b-top, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-1501ce5fc0254636a9b491e198eabf55, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-c1d7461714734236a5870e2e1645a5c2, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/26a24ff9c02a4f4fba879844622ef9b3, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-b23851a53c7e4c699e50179884d33738, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4a5694bea3954b81960c8157d9852bab, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/7e2a73c6aab343a1a21007395d8dca85, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/73fe4ae2a8fa4cdcbd4af6a5b3af6086, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/a7e624fffb784e43b7e4c6a4a516f6b0, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/100e8268ff4848b6add70e86adfb950b, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4b4f8108f9884643a97244b533ac97ba, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/b34eabc108cf4006ae5f82a3798a4223, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/ffd1818e1a684e7e940a2aaf62df3f7e, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/65581665fff04b2d9b5d19ef9c822dfb, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/9cb788ddbb4d435d9c909a01fbf12a95, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/da2529d12cbd42e0b9cce10f501d29dc, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/b06c6089b8b4449f9044e767071e706c, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/3c8bb465a4324e2e940a207d796b8ab6, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/71ba5276362244f6a9c6c5544a0d3e50, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/1eeed9d1f89a48cc84c97bd1ee6350b6, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4d7aec9f378a49fbba63ff8696dc4798, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/6f8c977a2ff04b3ea7aaf2a6849d08be] to archive 2024-12-09T01:54:18,550 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-09T01:54:18,551 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/225030c839dc4db3a3b3e2ee4c255c2b.016b1ee3f2107d7f0bb81894adf8f707 2024-12-09T01:54:18,552 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-1501ce5fc0254636a9b491e198eabf55 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-1501ce5fc0254636a9b491e198eabf55 2024-12-09T01:54:18,553 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-c1d7461714734236a5870e2e1645a5c2 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-c1d7461714734236a5870e2e1645a5c2 2024-12-09T01:54:18,554 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/26a24ff9c02a4f4fba879844622ef9b3 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/26a24ff9c02a4f4fba879844622ef9b3 2024-12-09T01:54:18,556 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-b23851a53c7e4c699e50179884d33738 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/TestLogRolling-testLogRolling=016b1ee3f2107d7f0bb81894adf8f707-b23851a53c7e4c699e50179884d33738 2024-12-09T01:54:18,557 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4a5694bea3954b81960c8157d9852bab to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4a5694bea3954b81960c8157d9852bab 2024-12-09T01:54:18,558 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/7e2a73c6aab343a1a21007395d8dca85 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/7e2a73c6aab343a1a21007395d8dca85 2024-12-09T01:54:18,559 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/73fe4ae2a8fa4cdcbd4af6a5b3af6086 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/73fe4ae2a8fa4cdcbd4af6a5b3af6086 2024-12-09T01:54:18,560 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/a7e624fffb784e43b7e4c6a4a516f6b0 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/a7e624fffb784e43b7e4c6a4a516f6b0 2024-12-09T01:54:18,561 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/100e8268ff4848b6add70e86adfb950b to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/100e8268ff4848b6add70e86adfb950b 2024-12-09T01:54:18,563 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4b4f8108f9884643a97244b533ac97ba to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4b4f8108f9884643a97244b533ac97ba 2024-12-09T01:54:18,564 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/b34eabc108cf4006ae5f82a3798a4223 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/b34eabc108cf4006ae5f82a3798a4223 2024-12-09T01:54:18,565 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/ffd1818e1a684e7e940a2aaf62df3f7e to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/ffd1818e1a684e7e940a2aaf62df3f7e 2024-12-09T01:54:18,566 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/65581665fff04b2d9b5d19ef9c822dfb to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/65581665fff04b2d9b5d19ef9c822dfb 2024-12-09T01:54:18,567 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/9cb788ddbb4d435d9c909a01fbf12a95 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/9cb788ddbb4d435d9c909a01fbf12a95 2024-12-09T01:54:18,568 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/da2529d12cbd42e0b9cce10f501d29dc to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/da2529d12cbd42e0b9cce10f501d29dc 2024-12-09T01:54:18,569 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/b06c6089b8b4449f9044e767071e706c to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/b06c6089b8b4449f9044e767071e706c 2024-12-09T01:54:18,570 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/3c8bb465a4324e2e940a207d796b8ab6 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/3c8bb465a4324e2e940a207d796b8ab6 2024-12-09T01:54:18,571 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/71ba5276362244f6a9c6c5544a0d3e50 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/71ba5276362244f6a9c6c5544a0d3e50 2024-12-09T01:54:18,572 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/1eeed9d1f89a48cc84c97bd1ee6350b6 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/1eeed9d1f89a48cc84c97bd1ee6350b6 2024-12-09T01:54:18,574 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4d7aec9f378a49fbba63ff8696dc4798 to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/4d7aec9f378a49fbba63ff8696dc4798 2024-12-09T01:54:18,575 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/6f8c977a2ff04b3ea7aaf2a6849d08be to hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/archive/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/info/6f8c977a2ff04b3ea7aaf2a6849d08be 2024-12-09T01:54:18,575 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [26a24ff9c02a4f4fba879844622ef9b3=43081, 4a5694bea3954b81960c8157d9852bab=12516, 7e2a73c6aab343a1a21007395d8dca85=68045, 73fe4ae2a8fa4cdcbd4af6a5b3af6086=22238, a7e624fffb784e43b7e4c6a4a516f6b0=15750, 100e8268ff4848b6add70e86adfb950b=97330, 4b4f8108f9884643a97244b533ac97ba=23316, b34eabc108cf4006ae5f82a3798a4223=19000, ffd1818e1a684e7e940a2aaf62df3f7e=118996, 65581665fff04b2d9b5d19ef9c822dfb=12516, 9cb788ddbb4d435d9c909a01fbf12a95=21156, da2529d12cbd42e0b9cce10f501d29dc=148409, b06c6089b8b4449f9044e767071e706c=17906, 3c8bb465a4324e2e940a207d796b8ab6=24409, 71ba5276362244f6a9c6c5544a0d3e50=180886, 1eeed9d1f89a48cc84c97bd1ee6350b6=17918, 4d7aec9f378a49fbba63ff8696dc4798=12523, 6f8c977a2ff04b3ea7aaf2a6849d08be=27649] 2024-12-09T01:54:18,578 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/data/default/TestLogRolling-testLogRolling/59d8a49309daf32c4dda194bc963c4f3/recovered.edits/335.seqid, newMaxSeqId=335, maxSeqId=126 2024-12-09T01:54:18,579 INFO [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:54:18,579 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 59d8a49309daf32c4dda194bc963c4f3: Waiting for close lock at 1733709258548Running coprocessor pre-close hooks at 1733709258548Disabling compacts and flushes for region at 1733709258548Disabling writes for close at 1733709258548Writing region close event to WAL at 1733709258575 (+27 ms)Running coprocessor post-close hooks at 1733709258579 (+4 ms)Closed at 1733709258579 2024-12-09T01:54:18,579 DEBUG [RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1733709205960.59d8a49309daf32c4dda194bc963c4f3. 2024-12-09T01:54:18,736 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,34803,1733709180806; all regions closed. 2024-12-09T01:54:18,737 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,737 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,737 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,737 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,737 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741834_1010 (size=8107) 2024-12-09T01:54:18,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741834_1010 (size=8107) 2024-12-09T01:54:18,742 DEBUG [RS:0;ef6f18c58dc9:34803 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/oldWALs 2024-12-09T01:54:18,742 INFO [RS:0;ef6f18c58dc9:34803 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C34803%2C1733709180806.meta:.meta(num 1733709181575) 2024-12-09T01:54:18,742 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,742 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,742 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,743 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,743 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741877_1053 (size=780) 2024-12-09T01:54:18,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741877_1053 (size=780) 2024-12-09T01:54:18,746 DEBUG [RS:0;ef6f18c58dc9:34803 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/oldWALs 2024-12-09T01:54:18,746 INFO [RS:0;ef6f18c58dc9:34803 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C34803%2C1733709180806:(num 1733709258515) 2024-12-09T01:54:18,746 DEBUG [RS:0;ef6f18c58dc9:34803 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:54:18,746 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:54:18,747 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:54:18,747 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T01:54:18,747 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:54:18,747 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:54:18,747 INFO [RS:0;ef6f18c58dc9:34803 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34803 2024-12-09T01:54:18,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:54:18,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,34803,1733709180806 2024-12-09T01:54:18,749 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:54:18,751 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,34803,1733709180806] 2024-12-09T01:54:18,752 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,34803,1733709180806 already deleted, retry=false 2024-12-09T01:54:18,752 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,34803,1733709180806 expired; onlineServers=0 2024-12-09T01:54:18,752 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef6f18c58dc9,33543,1733709180763' ***** 2024-12-09T01:54:18,752 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T01:54:18,752 INFO [M:0;ef6f18c58dc9:33543 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:54:18,752 INFO [M:0;ef6f18c58dc9:33543 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:54:18,752 DEBUG [M:0;ef6f18c58dc9:33543 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T01:54:18,752 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T01:54:18,752 DEBUG [M:0;ef6f18c58dc9:33543 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T01:54:18,752 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709180973 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709180973,5,FailOnTimeoutGroup] 2024-12-09T01:54:18,752 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709180974 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709180974,5,FailOnTimeoutGroup] 2024-12-09T01:54:18,752 INFO [M:0;ef6f18c58dc9:33543 {}] hbase.ChoreService(370): Chore service for: master/ef6f18c58dc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T01:54:18,752 INFO [M:0;ef6f18c58dc9:33543 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:54:18,753 DEBUG [M:0;ef6f18c58dc9:33543 {}] master.HMaster(1795): Stopping service threads 2024-12-09T01:54:18,753 INFO [M:0;ef6f18c58dc9:33543 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T01:54:18,753 INFO [M:0;ef6f18c58dc9:33543 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:54:18,753 INFO [M:0;ef6f18c58dc9:33543 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T01:54:18,753 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T01:54:18,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T01:54:18,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:18,754 DEBUG [M:0;ef6f18c58dc9:33543 {}] zookeeper.ZKUtil(347): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T01:54:18,754 WARN [M:0;ef6f18c58dc9:33543 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T01:54:18,754 INFO [M:0;ef6f18c58dc9:33543 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/.lastflushedseqids 2024-12-09T01:54:18,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741878_1054 (size=228) 2024-12-09T01:54:18,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741878_1054 (size=228) 2024-12-09T01:54:18,760 INFO [M:0;ef6f18c58dc9:33543 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T01:54:18,761 INFO [M:0;ef6f18c58dc9:33543 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T01:54:18,761 DEBUG [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:54:18,761 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:54:18,761 DEBUG [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:54:18,761 DEBUG [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:54:18,761 DEBUG [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:54:18,761 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.68 KB heapSize=65.90 KB 2024-12-09T01:54:18,777 DEBUG [M:0;ef6f18c58dc9:33543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/102a1b44ddba4a238bacb0e66f6f630d is 82, key is hbase:meta,,1/info:regioninfo/1733709181601/Put/seqid=0 2024-12-09T01:54:18,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741879_1055 (size=5672) 2024-12-09T01:54:18,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741879_1055 (size=5672) 2024-12-09T01:54:18,783 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/102a1b44ddba4a238bacb0e66f6f630d 2024-12-09T01:54:18,803 DEBUG [M:0;ef6f18c58dc9:33543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8beebc71eaad4a13b65b7c4ac4923ae3 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733709182009/Put/seqid=0 2024-12-09T01:54:18,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741880_1056 (size=7679) 2024-12-09T01:54:18,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741880_1056 (size=7679) 2024-12-09T01:54:18,808 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.08 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8beebc71eaad4a13b65b7c4ac4923ae3 2024-12-09T01:54:18,812 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8beebc71eaad4a13b65b7c4ac4923ae3 2024-12-09T01:54:18,826 DEBUG [M:0;ef6f18c58dc9:33543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/928a71a1761f4be0a4f76a04686978fc is 69, key is ef6f18c58dc9,34803,1733709180806/rs:state/1733709181051/Put/seqid=0 2024-12-09T01:54:18,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741881_1057 (size=5156) 2024-12-09T01:54:18,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741881_1057 (size=5156) 2024-12-09T01:54:18,831 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/928a71a1761f4be0a4f76a04686978fc 2024-12-09T01:54:18,849 DEBUG [M:0;ef6f18c58dc9:33543 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b92486f6497e41768d9cda579c0d22c4 is 52, key is load_balancer_on/state:d/1733709181640/Put/seqid=0 2024-12-09T01:54:18,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:54:18,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34803-0x100747a68320001, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:54:18,851 INFO [RS:0;ef6f18c58dc9:34803 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:54:18,851 INFO [RS:0;ef6f18c58dc9:34803 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,34803,1733709180806; zookeeper connection closed. 2024-12-09T01:54:18,851 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@751a40ba {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@751a40ba 2024-12-09T01:54:18,851 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T01:54:18,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741882_1058 (size=5056) 2024-12-09T01:54:18,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741882_1058 (size=5056) 2024-12-09T01:54:18,854 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b92486f6497e41768d9cda579c0d22c4 2024-12-09T01:54:18,860 DEBUG [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/102a1b44ddba4a238bacb0e66f6f630d as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/102a1b44ddba4a238bacb0e66f6f630d 2024-12-09T01:54:18,864 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/102a1b44ddba4a238bacb0e66f6f630d, entries=8, sequenceid=129, filesize=5.5 K 2024-12-09T01:54:18,865 DEBUG [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8beebc71eaad4a13b65b7c4ac4923ae3 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8beebc71eaad4a13b65b7c4ac4923ae3 2024-12-09T01:54:18,869 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8beebc71eaad4a13b65b7c4ac4923ae3 2024-12-09T01:54:18,869 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8beebc71eaad4a13b65b7c4ac4923ae3, entries=14, sequenceid=129, filesize=7.5 K 2024-12-09T01:54:18,870 DEBUG [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/928a71a1761f4be0a4f76a04686978fc as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/928a71a1761f4be0a4f76a04686978fc 2024-12-09T01:54:18,874 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/928a71a1761f4be0a4f76a04686978fc, entries=1, sequenceid=129, filesize=5.0 K 2024-12-09T01:54:18,875 DEBUG [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/b92486f6497e41768d9cda579c0d22c4 as hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b92486f6497e41768d9cda579c0d22c4 2024-12-09T01:54:18,879 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36289/user/jenkins/test-data/21711d45-16a2-f506-b167-67c92bd2c8f5/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/b92486f6497e41768d9cda579c0d22c4, entries=1, sequenceid=129, filesize=4.9 K 2024-12-09T01:54:18,880 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.68 KB/54973, heapSize ~65.84 KB/67416, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=129, compaction requested=false 2024-12-09T01:54:18,881 INFO [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:54:18,881 DEBUG [M:0;ef6f18c58dc9:33543 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709258761Disabling compacts and flushes for region at 1733709258761Disabling writes for close at 1733709258761Obtaining lock to block concurrent updates at 1733709258761Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733709258761Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=54973, getHeapSize=67416, getOffHeapSize=0, getCellsCount=152 at 1733709258761Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733709258762 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733709258762Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733709258777 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733709258777Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733709258788 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733709258802 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733709258802Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733709258812 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733709258826 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733709258826Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733709258835 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733709258849 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733709258849Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c8e724c: reopening flushed file at 1733709258859 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@584aca9f: reopening flushed file at 1733709258864 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f0bafdf: reopening flushed file at 1733709258869 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f7cde02: reopening flushed file at 1733709258874 (+5 ms)Finished flush of dataSize ~53.68 KB/54973, heapSize ~65.84 KB/67416, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 119ms, sequenceid=129, compaction requested=false at 1733709258880 (+6 ms)Writing region close event to WAL at 1733709258881 (+1 ms)Closed at 1733709258881 2024-12-09T01:54:18,882 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,882 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,882 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,882 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,882 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:18,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42201 is added to blk_1073741830_1006 (size=63903) 2024-12-09T01:54:18,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34061 is added to blk_1073741830_1006 (size=63903) 2024-12-09T01:54:18,885 INFO [M:0;ef6f18c58dc9:33543 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T01:54:18,885 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:54:18,885 INFO [M:0;ef6f18c58dc9:33543 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33543 2024-12-09T01:54:18,885 INFO [M:0;ef6f18c58dc9:33543 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:54:18,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:54:18,987 INFO [M:0;ef6f18c58dc9:33543 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:54:18,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33543-0x100747a68320000, quorum=127.0.0.1:54887, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:54:18,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1d7cc900{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:54:18,990 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20d96a0d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:54:18,990 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:54:18,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@425d5d71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:54:18,990 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@314e7370{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/hadoop.log.dir/,STOPPED} 2024-12-09T01:54:18,991 WARN [BP-352443598-172.17.0.2-1733709180084 heartbeating to localhost/127.0.0.1:36289 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:54:18,992 WARN [BP-352443598-172.17.0.2-1733709180084 heartbeating to localhost/127.0.0.1:36289 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-352443598-172.17.0.2-1733709180084 (Datanode Uuid 09a3cf37-e459-4f33-ab4e-6bff9d90af3a) service to localhost/127.0.0.1:36289 2024-12-09T01:54:18,992 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:54:18,992 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:54:18,992 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/cluster_04852131-dbb5-eb0c-0678-4e846323d3fa/data/data3/current/BP-352443598-172.17.0.2-1733709180084 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:54:18,992 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/cluster_04852131-dbb5-eb0c-0678-4e846323d3fa/data/data4/current/BP-352443598-172.17.0.2-1733709180084 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:54:18,993 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:54:18,994 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5411f427{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:54:18,995 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10e59f6e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:54:18,995 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:54:18,995 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50f209dc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:54:18,995 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d639fc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/hadoop.log.dir/,STOPPED} 2024-12-09T01:54:18,996 WARN [BP-352443598-172.17.0.2-1733709180084 heartbeating to localhost/127.0.0.1:36289 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:54:18,996 WARN [BP-352443598-172.17.0.2-1733709180084 heartbeating to localhost/127.0.0.1:36289 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-352443598-172.17.0.2-1733709180084 (Datanode Uuid b8070491-daa1-45cf-b793-5fca58ffabad) service to localhost/127.0.0.1:36289 2024-12-09T01:54:18,996 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:54:18,996 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:54:18,996 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/cluster_04852131-dbb5-eb0c-0678-4e846323d3fa/data/data1/current/BP-352443598-172.17.0.2-1733709180084 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:54:18,996 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/cluster_04852131-dbb5-eb0c-0678-4e846323d3fa/data/data2/current/BP-352443598-172.17.0.2-1733709180084 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:54:18,996 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:54:19,002 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25570184{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:54:19,003 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@148b0695{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:54:19,003 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:54:19,003 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@314bb674{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:54:19,003 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@240fc28c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/hadoop.log.dir/,STOPPED} 2024-12-09T01:54:19,010 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T01:54:19,036 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T01:54:19,036 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:19,050 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 208) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36289 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36289 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:36289 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36289 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36289 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36289 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36289 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36289 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36289 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36289 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=509 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=124 (was 186), ProcessCount=11 (was 11), AvailableMemoryMB=11516 (was 11627) 2024-12-09T01:54:19,060 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=170, ProcessCount=11, AvailableMemoryMB=11516 2024-12-09T01:54:19,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T01:54:19,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/hadoop.log.dir so I do NOT create it in target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16 2024-12-09T01:54:19,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/ed067f6f-7154-9227-f200-454673d252f7/hadoop.tmp.dir so I do NOT create it in target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16 2024-12-09T01:54:19,060 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/cluster_20aeaa3d-8043-e7e0-d846-69496467b8d7, deleteOnExit=true 2024-12-09T01:54:19,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T01:54:19,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/test.cache.data in system properties and HBase conf 2024-12-09T01:54:19,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T01:54:19,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/hadoop.log.dir in system properties and HBase conf 2024-12-09T01:54:19,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T01:54:19,060 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T01:54:19,061 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/nfs.dump.dir in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/java.io.tmpdir in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T01:54:19,061 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T01:54:19,066 INFO [regionserver/ef6f18c58dc9:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:54:19,074 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:54:19,131 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:54:19,135 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:54:19,139 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:54:19,139 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:54:19,139 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:54:19,140 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:54:19,141 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34f5471c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:54:19,141 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4dfd679e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:54:19,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:19,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@25e609c4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/java.io.tmpdir/jetty-localhost-37117-hadoop-hdfs-3_4_1-tests_jar-_-any-18098127196869134113/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:54:19,254 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c411d10{HTTP/1.1, (http/1.1)}{localhost:37117} 2024-12-09T01:54:19,254 INFO [Time-limited test {}] server.Server(415): Started @321992ms 2024-12-09T01:54:19,267 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-12-09T01:54:19,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:19,353 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:54:19,358 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:54:19,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:54:19,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:54:19,360 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T01:54:19,364 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@186dc28{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:54:19,364 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5eeda3c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:54:19,499 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@20042785{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/java.io.tmpdir/jetty-localhost-43759-hadoop-hdfs-3_4_1-tests_jar-_-any-8664063606423368170/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:54:19,500 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@79ca80d8{HTTP/1.1, (http/1.1)}{localhost:43759} 2024-12-09T01:54:19,500 INFO [Time-limited test {}] server.Server(415): Started @322237ms 2024-12-09T01:54:19,501 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:54:19,530 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T01:54:19,532 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T01:54:19,533 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T01:54:19,533 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T01:54:19,533 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T01:54:19,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10e33487{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/hadoop.log.dir/,AVAILABLE} 2024-12-09T01:54:19,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@71465f4c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T01:54:19,591 WARN [Thread-2495 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/cluster_20aeaa3d-8043-e7e0-d846-69496467b8d7/data/data2/current/BP-375238139-172.17.0.2-1733709259080/current, will proceed with Du for space computation calculation, 2024-12-09T01:54:19,591 WARN [Thread-2494 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/cluster_20aeaa3d-8043-e7e0-d846-69496467b8d7/data/data1/current/BP-375238139-172.17.0.2-1733709259080/current, will proceed with Du for space computation calculation, 2024-12-09T01:54:19,613 WARN [Thread-2473 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:54:19,615 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x980265224b9f28ea with lease ID 0x4e63cd0f1c905e61: Processing first storage report for DS-b1f81415-a47b-4faf-a0cd-ec030ef3d25e from datanode DatanodeRegistration(127.0.0.1:44165, datanodeUuid=7f5a0ab2-19e4-4897-93a9-a2a92e0a182b, infoPort=33811, infoSecurePort=0, ipcPort=39269, storageInfo=lv=-57;cid=testClusterID;nsid=1111988719;c=1733709259080) 2024-12-09T01:54:19,615 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x980265224b9f28ea with lease ID 0x4e63cd0f1c905e61: from storage DS-b1f81415-a47b-4faf-a0cd-ec030ef3d25e node DatanodeRegistration(127.0.0.1:44165, datanodeUuid=7f5a0ab2-19e4-4897-93a9-a2a92e0a182b, infoPort=33811, infoSecurePort=0, ipcPort=39269, storageInfo=lv=-57;cid=testClusterID;nsid=1111988719;c=1733709259080), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:54:19,616 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x980265224b9f28ea with lease ID 0x4e63cd0f1c905e61: Processing first storage report for DS-1d86c73d-114e-411c-9dec-57a0f06cfd78 from datanode DatanodeRegistration(127.0.0.1:44165, datanodeUuid=7f5a0ab2-19e4-4897-93a9-a2a92e0a182b, infoPort=33811, infoSecurePort=0, ipcPort=39269, storageInfo=lv=-57;cid=testClusterID;nsid=1111988719;c=1733709259080) 2024-12-09T01:54:19,616 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x980265224b9f28ea with lease ID 0x4e63cd0f1c905e61: from storage DS-1d86c73d-114e-411c-9dec-57a0f06cfd78 node DatanodeRegistration(127.0.0.1:44165, datanodeUuid=7f5a0ab2-19e4-4897-93a9-a2a92e0a182b, infoPort=33811, infoSecurePort=0, ipcPort=39269, storageInfo=lv=-57;cid=testClusterID;nsid=1111988719;c=1733709259080), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:54:19,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@e818af2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/java.io.tmpdir/jetty-localhost-37363-hadoop-hdfs-3_4_1-tests_jar-_-any-16655989807060264199/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:54:19,653 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@87b2e2b{HTTP/1.1, (http/1.1)}{localhost:37363} 2024-12-09T01:54:19,653 INFO [Time-limited test {}] server.Server(415): Started @322390ms 2024-12-09T01:54:19,654 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T01:54:19,745 WARN [Thread-2520 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/cluster_20aeaa3d-8043-e7e0-d846-69496467b8d7/data/data3/current/BP-375238139-172.17.0.2-1733709259080/current, will proceed with Du for space computation calculation, 2024-12-09T01:54:19,745 WARN [Thread-2521 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/cluster_20aeaa3d-8043-e7e0-d846-69496467b8d7/data/data4/current/BP-375238139-172.17.0.2-1733709259080/current, will proceed with Du for space computation calculation, 2024-12-09T01:54:19,762 WARN [Thread-2509 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T01:54:19,764 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xedb7347c80627c8d with lease ID 0x4e63cd0f1c905e62: Processing first storage report for DS-22803aba-c2c5-4386-8024-193a1aee95f3 from datanode DatanodeRegistration(127.0.0.1:38549, datanodeUuid=177f7649-7d8d-4582-99eb-9f5047aeff42, infoPort=42589, infoSecurePort=0, ipcPort=37601, storageInfo=lv=-57;cid=testClusterID;nsid=1111988719;c=1733709259080) 2024-12-09T01:54:19,764 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xedb7347c80627c8d with lease ID 0x4e63cd0f1c905e62: from storage DS-22803aba-c2c5-4386-8024-193a1aee95f3 node DatanodeRegistration(127.0.0.1:38549, datanodeUuid=177f7649-7d8d-4582-99eb-9f5047aeff42, infoPort=42589, infoSecurePort=0, ipcPort=37601, storageInfo=lv=-57;cid=testClusterID;nsid=1111988719;c=1733709259080), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:54:19,764 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xedb7347c80627c8d with lease ID 0x4e63cd0f1c905e62: Processing first storage report for DS-4c95c6c2-2f77-4b53-bd40-cabbfcd96468 from datanode DatanodeRegistration(127.0.0.1:38549, datanodeUuid=177f7649-7d8d-4582-99eb-9f5047aeff42, infoPort=42589, infoSecurePort=0, ipcPort=37601, storageInfo=lv=-57;cid=testClusterID;nsid=1111988719;c=1733709259080) 2024-12-09T01:54:19,764 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xedb7347c80627c8d with lease ID 0x4e63cd0f1c905e62: from storage DS-4c95c6c2-2f77-4b53-bd40-cabbfcd96468 node DatanodeRegistration(127.0.0.1:38549, datanodeUuid=177f7649-7d8d-4582-99eb-9f5047aeff42, infoPort=42589, infoSecurePort=0, ipcPort=37601, storageInfo=lv=-57;cid=testClusterID;nsid=1111988719;c=1733709259080), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T01:54:19,777 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16 2024-12-09T01:54:19,780 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/cluster_20aeaa3d-8043-e7e0-d846-69496467b8d7/zookeeper_0, clientPort=57283, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/cluster_20aeaa3d-8043-e7e0-d846-69496467b8d7/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/cluster_20aeaa3d-8043-e7e0-d846-69496467b8d7/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T01:54:19,781 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57283 2024-12-09T01:54:19,782 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:54:19,783 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:54:19,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:54:19,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741825_1001 (size=7) 2024-12-09T01:54:19,793 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3 with version=8 2024-12-09T01:54:19,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:36525/user/jenkins/test-data/2ece4ae0-2117-8925-7c38-0678942d59d1/hbase-staging 2024-12-09T01:54:19,795 INFO [Time-limited test {}] client.ConnectionUtils(128): master/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:54:19,795 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:54:19,795 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:54:19,795 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:54:19,795 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:54:19,795 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:54:19,795 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T01:54:19,795 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:54:19,796 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33525 2024-12-09T01:54:19,797 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33525 connecting to ZooKeeper ensemble=127.0.0.1:57283 2024-12-09T01:54:19,803 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:335250x0, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:54:19,804 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33525-0x100747b9cea0000 connected 2024-12-09T01:54:19,824 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:54:19,826 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:54:19,827 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:54:19,828 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3, hbase.cluster.distributed=false 2024-12-09T01:54:19,829 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:54:19,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33525 2024-12-09T01:54:19,831 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33525 2024-12-09T01:54:19,832 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33525 2024-12-09T01:54:19,834 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33525 2024-12-09T01:54:19,836 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33525 2024-12-09T01:54:19,851 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/ef6f18c58dc9:0 server-side Connection retries=45 2024-12-09T01:54:19,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:54:19,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T01:54:19,851 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T01:54:19,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T01:54:19,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T01:54:19,851 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T01:54:19,851 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T01:54:19,852 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34649 2024-12-09T01:54:19,853 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34649 connecting to ZooKeeper ensemble=127.0.0.1:57283 2024-12-09T01:54:19,854 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:54:19,856 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:54:19,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:346490x0, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T01:54:19,860 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34649-0x100747b9cea0001 connected 2024-12-09T01:54:19,860 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:54:19,861 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T01:54:19,863 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T01:54:19,863 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T01:54:19,864 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T01:54:19,866 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34649 2024-12-09T01:54:19,867 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34649 2024-12-09T01:54:19,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34649 2024-12-09T01:54:19,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34649 2024-12-09T01:54:19,872 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34649 2024-12-09T01:54:19,891 DEBUG [M:0;ef6f18c58dc9:33525 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;ef6f18c58dc9:33525 2024-12-09T01:54:19,891 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/ef6f18c58dc9,33525,1733709259795 2024-12-09T01:54:19,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:54:19,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:54:19,894 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/ef6f18c58dc9,33525,1733709259795 2024-12-09T01:54:19,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T01:54:19,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:19,896 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:19,896 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T01:54:19,897 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/ef6f18c58dc9,33525,1733709259795 from backup master directory 2024-12-09T01:54:19,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/ef6f18c58dc9,33525,1733709259795 2024-12-09T01:54:19,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:54:19,898 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T01:54:19,898 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:54:19,898 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=ef6f18c58dc9,33525,1733709259795 2024-12-09T01:54:19,902 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/hbase.id] with ID: f9b2a113-3783-4712-a5e4-f32305416998 2024-12-09T01:54:19,902 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/.tmp/hbase.id 2024-12-09T01:54:19,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:54:19,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741826_1002 (size=42) 2024-12-09T01:54:19,908 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/.tmp/hbase.id]:[hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/hbase.id] 2024-12-09T01:54:19,918 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:54:19,919 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T01:54:19,920 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-12-09T01:54:19,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:19,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:19,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:54:19,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741827_1003 (size=196) 2024-12-09T01:54:19,929 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T01:54:19,930 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T01:54:19,930 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:54:19,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:54:19,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741828_1004 (size=1189) 2024-12-09T01:54:19,938 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store 2024-12-09T01:54:19,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:54:19,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741829_1005 (size=34) 2024-12-09T01:54:19,946 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:54:19,946 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:54:19,946 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:54:19,946 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:54:19,947 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:54:19,947 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:54:19,947 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:54:19,947 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709259946Disabling compacts and flushes for region at 1733709259946Disabling writes for close at 1733709259947 (+1 ms)Writing region close event to WAL at 1733709259947Closed at 1733709259947 2024-12-09T01:54:19,948 WARN [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/.initializing 2024-12-09T01:54:19,948 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/WALs/ef6f18c58dc9,33525,1733709259795 2024-12-09T01:54:19,950 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C33525%2C1733709259795, suffix=, logDir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/WALs/ef6f18c58dc9,33525,1733709259795, archiveDir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/oldWALs, maxLogs=10 2024-12-09T01:54:19,951 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C33525%2C1733709259795.1733709259950 2024-12-09T01:54:19,956 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/WALs/ef6f18c58dc9,33525,1733709259795/ef6f18c58dc9%2C33525%2C1733709259795.1733709259950 2024-12-09T01:54:19,960 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42589:42589),(127.0.0.1/127.0.0.1:33811:33811)] 2024-12-09T01:54:19,963 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:54:19,963 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:54:19,963 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:54:19,963 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:54:19,967 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:54:19,969 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T01:54:19,969 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:19,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:54:19,970 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:54:19,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T01:54:19,971 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:19,971 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:54:19,972 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:54:19,973 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T01:54:19,973 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:19,974 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:54:19,974 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:54:19,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T01:54:19,975 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:19,975 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T01:54:19,976 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:54:19,976 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:54:19,977 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:54:19,978 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:54:19,978 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:54:19,978 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T01:54:19,980 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T01:54:19,982 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:54:19,982 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812409, jitterRate=0.033031463623046875}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T01:54:19,983 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733709259963Initializing all the Stores at 1733709259964 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709259964Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709259967 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709259967Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709259967Cleaning up temporary data from old regions at 1733709259978 (+11 ms)Region opened successfully at 1733709259983 (+5 ms) 2024-12-09T01:54:19,984 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T01:54:19,987 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7832e091, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:54:19,988 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T01:54:19,988 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T01:54:19,988 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T01:54:19,988 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T01:54:19,989 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T01:54:19,989 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T01:54:19,989 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T01:54:19,993 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T01:54:19,994 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T01:54:19,996 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T01:54:19,996 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T01:54:19,997 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T01:54:19,999 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T01:54:19,999 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T01:54:20,000 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T01:54:20,001 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T01:54:20,002 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T01:54:20,004 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T01:54:20,005 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T01:54:20,007 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T01:54:20,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:54:20,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T01:54:20,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:20,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:20,010 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=ef6f18c58dc9,33525,1733709259795, sessionid=0x100747b9cea0000, setting cluster-up flag (Was=false) 2024-12-09T01:54:20,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:20,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:20,019 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T01:54:20,020 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,33525,1733709259795 2024-12-09T01:54:20,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:20,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:20,030 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T01:54:20,031 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=ef6f18c58dc9,33525,1733709259795 2024-12-09T01:54:20,032 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T01:54:20,034 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T01:54:20,034 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T01:54:20,034 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T01:54:20,034 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: ef6f18c58dc9,33525,1733709259795 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T01:54:20,035 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:54:20,035 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:54:20,035 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:54:20,036 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=5, maxPoolSize=5 2024-12-09T01:54:20,036 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/ef6f18c58dc9:0, corePoolSize=10, maxPoolSize=10 2024-12-09T01:54:20,036 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,036 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:54:20,036 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:20,037 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:54:20,038 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T01:54:20,039 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:20,039 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T01:54:20,045 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733709290045 2024-12-09T01:54:20,045 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T01:54:20,045 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T01:54:20,046 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T01:54:20,046 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T01:54:20,046 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T01:54:20,046 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T01:54:20,046 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,046 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T01:54:20,046 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T01:54:20,047 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T01:54:20,047 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T01:54:20,047 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T01:54:20,047 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709260047,5,FailOnTimeoutGroup] 2024-12-09T01:54:20,047 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709260047,5,FailOnTimeoutGroup] 2024-12-09T01:54:20,047 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,048 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T01:54:20,048 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,048 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:54:20,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741831_1007 (size=1321) 2024-12-09T01:54:20,054 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T01:54:20,054 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3 2024-12-09T01:54:20,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:54:20,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741832_1008 (size=32) 2024-12-09T01:54:20,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:54:20,068 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:54:20,069 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:54:20,069 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:20,070 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:54:20,070 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:54:20,071 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:54:20,071 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:20,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:54:20,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:54:20,073 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:54:20,073 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:20,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:54:20,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:54:20,074 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:54:20,074 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:20,075 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(746): ClusterId : f9b2a113-3783-4712-a5e4-f32305416998 2024-12-09T01:54:20,075 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T01:54:20,075 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:54:20,075 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:54:20,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/data/hbase/meta/1588230740 2024-12-09T01:54:20,076 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/data/hbase/meta/1588230740 2024-12-09T01:54:20,077 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T01:54:20,077 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T01:54:20,077 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:54:20,077 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:54:20,078 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:54:20,079 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:54:20,079 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T01:54:20,080 DEBUG [RS:0;ef6f18c58dc9:34649 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5840ae2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=ef6f18c58dc9/172.17.0.2:0 2024-12-09T01:54:20,081 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T01:54:20,082 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821023, jitterRate=0.043985962867736816}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:54:20,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733709260065Initializing all the Stores at 1733709260066 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709260066Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709260068 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709260068Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709260068Cleaning up temporary data from old regions at 1733709260077 (+9 ms)Region opened successfully at 1733709260082 (+5 ms) 2024-12-09T01:54:20,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:54:20,082 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:54:20,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:54:20,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:54:20,082 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:54:20,083 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:54:20,083 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709260082Disabling compacts and flushes for region at 1733709260082Disabling writes for close at 1733709260082Writing region close event to WAL at 1733709260083 (+1 ms)Closed at 1733709260083 2024-12-09T01:54:20,084 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:54:20,084 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T01:54:20,085 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T01:54:20,086 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:54:20,087 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T01:54:20,097 DEBUG [RS:0;ef6f18c58dc9:34649 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;ef6f18c58dc9:34649 2024-12-09T01:54:20,097 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T01:54:20,097 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T01:54:20,097 DEBUG [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T01:54:20,098 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(2659): reportForDuty to master=ef6f18c58dc9,33525,1733709259795 with port=34649, startcode=1733709259851 2024-12-09T01:54:20,098 DEBUG [RS:0;ef6f18c58dc9:34649 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T01:54:20,104 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37637, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T01:54:20,105 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33525 {}] master.ServerManager(363): Checking decommissioned status of RegionServer ef6f18c58dc9,34649,1733709259851 2024-12-09T01:54:20,105 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33525 {}] master.ServerManager(517): Registering regionserver=ef6f18c58dc9,34649,1733709259851 2024-12-09T01:54:20,107 DEBUG [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3 2024-12-09T01:54:20,107 DEBUG [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45481 2024-12-09T01:54:20,107 DEBUG [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T01:54:20,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:54:20,110 DEBUG [RS:0;ef6f18c58dc9:34649 {}] zookeeper.ZKUtil(111): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/ef6f18c58dc9,34649,1733709259851 2024-12-09T01:54:20,110 WARN [RS:0;ef6f18c58dc9:34649 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T01:54:20,110 INFO [RS:0;ef6f18c58dc9:34649 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:54:20,110 DEBUG [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/WALs/ef6f18c58dc9,34649,1733709259851 2024-12-09T01:54:20,111 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [ef6f18c58dc9,34649,1733709259851] 2024-12-09T01:54:20,114 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T01:54:20,115 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T01:54:20,116 INFO [RS:0;ef6f18c58dc9:34649 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T01:54:20,116 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,116 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T01:54:20,117 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T01:54:20,117 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=2, maxPoolSize=2 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/ef6f18c58dc9:0, corePoolSize=1, maxPoolSize=1 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:54:20,117 DEBUG [RS:0;ef6f18c58dc9:34649 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/ef6f18c58dc9:0, corePoolSize=3, maxPoolSize=3 2024-12-09T01:54:20,118 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,118 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,118 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,118 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,118 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,118 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34649,1733709259851-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:54:20,141 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T01:54:20,141 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,34649,1733709259851-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,141 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,141 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.Replication(171): ef6f18c58dc9,34649,1733709259851 started 2024-12-09T01:54:20,155 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,155 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(1482): Serving as ef6f18c58dc9,34649,1733709259851, RpcServer on ef6f18c58dc9/172.17.0.2:34649, sessionid=0x100747b9cea0001 2024-12-09T01:54:20,155 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T01:54:20,155 DEBUG [RS:0;ef6f18c58dc9:34649 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager ef6f18c58dc9,34649,1733709259851 2024-12-09T01:54:20,155 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,34649,1733709259851' 2024-12-09T01:54:20,155 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T01:54:20,156 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T01:54:20,157 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T01:54:20,157 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T01:54:20,157 DEBUG [RS:0;ef6f18c58dc9:34649 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager ef6f18c58dc9,34649,1733709259851 2024-12-09T01:54:20,157 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'ef6f18c58dc9,34649,1733709259851' 2024-12-09T01:54:20,157 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T01:54:20,157 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T01:54:20,157 DEBUG [RS:0;ef6f18c58dc9:34649 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T01:54:20,157 INFO [RS:0;ef6f18c58dc9:34649 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T01:54:20,157 INFO [RS:0;ef6f18c58dc9:34649 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T01:54:20,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:20,237 WARN [ef6f18c58dc9:33525 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T01:54:20,259 INFO [RS:0;ef6f18c58dc9:34649 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C34649%2C1733709259851, suffix=, logDir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/WALs/ef6f18c58dc9,34649,1733709259851, archiveDir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/oldWALs, maxLogs=32 2024-12-09T01:54:20,260 INFO [RS:0;ef6f18c58dc9:34649 {}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C34649%2C1733709259851.1733709260260 2024-12-09T01:54:20,265 INFO [RS:0;ef6f18c58dc9:34649 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/WALs/ef6f18c58dc9,34649,1733709259851/ef6f18c58dc9%2C34649%2C1733709259851.1733709260260 2024-12-09T01:54:20,266 DEBUG [RS:0;ef6f18c58dc9:34649 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33811:33811),(127.0.0.1/127.0.0.1:42589:42589)] 2024-12-09T01:54:20,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:20,487 DEBUG [ef6f18c58dc9:33525 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-09T01:54:20,488 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=ef6f18c58dc9,34649,1733709259851 2024-12-09T01:54:20,489 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,34649,1733709259851, state=OPENING 2024-12-09T01:54:20,491 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T01:54:20,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:20,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:20,493 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T01:54:20,493 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:54:20,493 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:54:20,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,34649,1733709259851}] 2024-12-09T01:54:20,646 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T01:54:20,648 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46803, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T01:54:20,652 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T01:54:20,652 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:54:20,654 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=ef6f18c58dc9%2C34649%2C1733709259851.meta, suffix=.meta, logDir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/WALs/ef6f18c58dc9,34649,1733709259851, archiveDir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/oldWALs, maxLogs=32 2024-12-09T01:54:20,654 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor ef6f18c58dc9%2C34649%2C1733709259851.meta.1733709260654.meta 2024-12-09T01:54:20,659 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/WALs/ef6f18c58dc9,34649,1733709259851/ef6f18c58dc9%2C34649%2C1733709259851.meta.1733709260654.meta 2024-12-09T01:54:20,662 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33811:33811),(127.0.0.1/127.0.0.1:42589:42589)] 2024-12-09T01:54:20,664 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T01:54:20,665 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T01:54:20,665 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T01:54:20,665 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T01:54:20,665 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T01:54:20,665 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T01:54:20,665 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T01:54:20,665 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T01:54:20,668 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T01:54:20,669 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T01:54:20,669 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:20,669 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:54:20,669 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T01:54:20,670 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T01:54:20,670 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:20,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:54:20,670 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T01:54:20,671 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T01:54:20,671 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:20,671 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:54:20,672 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T01:54:20,672 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T01:54:20,672 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T01:54:20,673 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T01:54:20,673 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T01:54:20,674 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/data/hbase/meta/1588230740 2024-12-09T01:54:20,675 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/data/hbase/meta/1588230740 2024-12-09T01:54:20,676 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T01:54:20,676 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T01:54:20,676 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-09T01:54:20,677 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T01:54:20,678 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=838616, jitterRate=0.06635552644729614}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-09T01:54:20,678 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T01:54:20,678 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733709260665Writing region info on filesystem at 1733709260665Initializing all the Stores at 1733709260666 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709260666Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709260668 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733709260668Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733709260668Cleaning up temporary data from old regions at 1733709260676 (+8 ms)Running coprocessor post-open hooks at 1733709260678 (+2 ms)Region opened successfully at 1733709260678 2024-12-09T01:54:20,679 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733709260646 2024-12-09T01:54:20,682 DEBUG [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T01:54:20,682 INFO [RS_OPEN_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T01:54:20,682 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=ef6f18c58dc9,34649,1733709259851 2024-12-09T01:54:20,683 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as ef6f18c58dc9,34649,1733709259851, state=OPEN 2024-12-09T01:54:20,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:54:20,688 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T01:54:20,688 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:54:20,688 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T01:54:20,688 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,34649,1733709259851 2024-12-09T01:54:20,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T01:54:20,691 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=ef6f18c58dc9,34649,1733709259851 in 195 msec 2024-12-09T01:54:20,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T01:54:20,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 607 msec 2024-12-09T01:54:20,693 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T01:54:20,693 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T01:54:20,695 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:54:20,695 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,34649,1733709259851, seqNum=-1] 2024-12-09T01:54:20,695 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:54:20,696 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55767, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:54:20,701 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 667 msec 2024-12-09T01:54:20,701 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733709260701, completionTime=-1 2024-12-09T01:54:20,701 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-09T01:54:20,701 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T01:54:20,703 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-12-09T01:54:20,703 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733709320703 2024-12-09T01:54:20,703 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733709380703 2024-12-09T01:54:20,703 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T01:54:20,704 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33525,1733709259795-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,704 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33525,1733709259795-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,704 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33525,1733709259795-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,704 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-ef6f18c58dc9:33525, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,704 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,704 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,705 DEBUG [master/ef6f18c58dc9:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T01:54:20,708 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.810sec 2024-12-09T01:54:20,708 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T01:54:20,708 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T01:54:20,708 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T01:54:20,708 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T01:54:20,708 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T01:54:20,708 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33525,1733709259795-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T01:54:20,708 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33525,1733709259795-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T01:54:20,710 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T01:54:20,710 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T01:54:20,711 INFO [master/ef6f18c58dc9:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ef6f18c58dc9,33525,1733709259795-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T01:54:20,778 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2269c58e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:54:20,778 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request ef6f18c58dc9,33525,-1 for getting cluster id 2024-12-09T01:54:20,778 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T01:54:20,780 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'f9b2a113-3783-4712-a5e4-f32305416998' 2024-12-09T01:54:20,780 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T01:54:20,780 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "f9b2a113-3783-4712-a5e4-f32305416998" 2024-12-09T01:54:20,780 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@255c3c6d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:54:20,780 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [ef6f18c58dc9,33525,-1] 2024-12-09T01:54:20,781 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T01:54:20,781 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:54:20,782 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35662, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T01:54:20,783 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49e6dd92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T01:54:20,783 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T01:54:20,784 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=ef6f18c58dc9,34649,1733709259851, seqNum=-1] 2024-12-09T01:54:20,784 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T01:54:20,785 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37616, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T01:54:20,787 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=ef6f18c58dc9,33525,1733709259795 2024-12-09T01:54:20,787 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T01:54:20,789 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-12-09T01:54:20,789 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T01:54:20,791 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/WALs/test.com,8080,1, archiveDir=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/oldWALs, maxLogs=32 2024-12-09T01:54:20,791 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733709260791 2024-12-09T01:54:20,796 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/WALs/test.com,8080,1/test.com%2C8080%2C1.1733709260791 2024-12-09T01:54:20,797 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33811:33811),(127.0.0.1/127.0.0.1:42589:42589)] 2024-12-09T01:54:20,797 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1733709260797 2024-12-09T01:54:20,802 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:20,802 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:20,802 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:20,802 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:20,802 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:20,802 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/WALs/test.com,8080,1/test.com%2C8080%2C1.1733709260791 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/WALs/test.com,8080,1/test.com%2C8080%2C1.1733709260797 2024-12-09T01:54:20,804 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33811:33811),(127.0.0.1/127.0.0.1:42589:42589)] 2024-12-09T01:54:20,804 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/WALs/test.com,8080,1/test.com%2C8080%2C1.1733709260791 is not closed yet, will try archiving it next time 2024-12-09T01:54:20,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741835_1011 (size=93) 2024-12-09T01:54:20,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741835_1011 (size=93) 2024-12-09T01:54:20,805 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:20,805 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:20,805 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:20,805 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:20,805 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:20,806 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/WALs/test.com,8080,1/test.com%2C8080%2C1.1733709260791 to hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/oldWALs/test.com%2C8080%2C1.1733709260791 2024-12-09T01:54:20,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741836_1012 (size=93) 2024-12-09T01:54:20,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741836_1012 (size=93) 2024-12-09T01:54:20,809 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/oldWALs 2024-12-09T01:54:20,809 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1733709260797) 2024-12-09T01:54:20,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T01:54:20,809 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:54:20,809 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:54:20,809 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:54:20,809 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:54:20,810 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T01:54:20,810 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T01:54:20,810 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1672285668, stopped=false 2024-12-09T01:54:20,810 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=ef6f18c58dc9,33525,1733709259795 2024-12-09T01:54:20,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:54:20,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T01:54:20,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:20,812 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:54:20,812 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:20,812 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T01:54:20,812 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:54:20,812 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:54:20,812 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'ef6f18c58dc9,34649,1733709259851' ***** 2024-12-09T01:54:20,812 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T01:54:20,812 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T01:54:20,813 INFO [RS:0;ef6f18c58dc9:34649 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T01:54:20,813 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T01:54:20,813 INFO [RS:0;ef6f18c58dc9:34649 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T01:54:20,813 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(959): stopping server ef6f18c58dc9,34649,1733709259851 2024-12-09T01:54:20,813 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:54:20,813 INFO [RS:0;ef6f18c58dc9:34649 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;ef6f18c58dc9:34649. 2024-12-09T01:54:20,813 DEBUG [RS:0;ef6f18c58dc9:34649 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T01:54:20,813 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:54:20,813 DEBUG [RS:0;ef6f18c58dc9:34649 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:54:20,813 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T01:54:20,813 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T01:54:20,813 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T01:54:20,813 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T01:54:20,813 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T01:54:20,813 DEBUG [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T01:54:20,813 DEBUG [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T01:54:20,813 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T01:54:20,813 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T01:54:20,814 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T01:54:20,814 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T01:54:20,814 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T01:54:20,814 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-12-09T01:54:20,816 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T01:54:20,829 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/data/hbase/meta/1588230740/.tmp/ns/deb001a971254bfcb0dce1527152f02a is 43, key is default/ns:d/1733709260697/Put/seqid=0 2024-12-09T01:54:20,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741837_1013 (size=5153) 2024-12-09T01:54:20,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741837_1013 (size=5153) 2024-12-09T01:54:20,843 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/data/hbase/meta/1588230740/.tmp/ns/deb001a971254bfcb0dce1527152f02a 2024-12-09T01:54:20,849 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/data/hbase/meta/1588230740/.tmp/ns/deb001a971254bfcb0dce1527152f02a as hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/data/hbase/meta/1588230740/ns/deb001a971254bfcb0dce1527152f02a 2024-12-09T01:54:20,854 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/data/hbase/meta/1588230740/ns/deb001a971254bfcb0dce1527152f02a, entries=2, sequenceid=6, filesize=5.0 K 2024-12-09T01:54:20,855 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 41ms, sequenceid=6, compaction requested=false 2024-12-09T01:54:20,855 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T01:54:20,860 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-09T01:54:20,861 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T01:54:20,861 INFO [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T01:54:20,861 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733709260813Running coprocessor pre-close hooks at 1733709260813Disabling compacts and flushes for region at 1733709260813Disabling writes for close at 1733709260814 (+1 ms)Obtaining lock to block concurrent updates at 1733709260814Preparing flush snapshotting stores in 1588230740 at 1733709260814Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1733709260814Flushing stores of hbase:meta,,1.1588230740 at 1733709260815 (+1 ms)Flushing 1588230740/ns: creating writer at 1733709260815Flushing 1588230740/ns: appending metadata at 1733709260829 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733709260829Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27f241e: reopening flushed file at 1733709260849 (+20 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 41ms, sequenceid=6, compaction requested=false at 1733709260855 (+6 ms)Writing region close event to WAL at 1733709260857 (+2 ms)Running coprocessor post-close hooks at 1733709260861 (+4 ms)Closed at 1733709260861 2024-12-09T01:54:20,861 DEBUG [RS_CLOSE_META-regionserver/ef6f18c58dc9:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T01:54:21,014 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(976): stopping server ef6f18c58dc9,34649,1733709259851; all regions closed. 2024-12-09T01:54:21,014 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,014 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,014 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,014 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,014 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741834_1010 (size=1152) 2024-12-09T01:54:21,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741834_1010 (size=1152) 2024-12-09T01:54:21,019 DEBUG [RS:0;ef6f18c58dc9:34649 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/oldWALs 2024-12-09T01:54:21,019 INFO [RS:0;ef6f18c58dc9:34649 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C34649%2C1733709259851.meta:.meta(num 1733709260654) 2024-12-09T01:54:21,019 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,019 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,019 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,020 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,020 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741833_1009 (size=93) 2024-12-09T01:54:21,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741833_1009 (size=93) 2024-12-09T01:54:21,023 DEBUG [RS:0;ef6f18c58dc9:34649 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/oldWALs 2024-12-09T01:54:21,023 INFO [RS:0;ef6f18c58dc9:34649 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog ef6f18c58dc9%2C34649%2C1733709259851:(num 1733709260260) 2024-12-09T01:54:21,023 DEBUG [RS:0;ef6f18c58dc9:34649 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T01:54:21,023 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T01:54:21,023 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:54:21,024 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.ChoreService(370): Chore service for: regionserver/ef6f18c58dc9:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T01:54:21,024 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:54:21,024 INFO [regionserver/ef6f18c58dc9:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:54:21,024 INFO [RS:0;ef6f18c58dc9:34649 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34649 2024-12-09T01:54:21,026 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:54:21,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T01:54:21,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/ef6f18c58dc9,34649,1733709259851 2024-12-09T01:54:21,028 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [ef6f18c58dc9,34649,1733709259851] 2024-12-09T01:54:21,030 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/ef6f18c58dc9,34649,1733709259851 already deleted, retry=false 2024-12-09T01:54:21,030 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; ef6f18c58dc9,34649,1733709259851 expired; onlineServers=0 2024-12-09T01:54:21,030 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'ef6f18c58dc9,33525,1733709259795' ***** 2024-12-09T01:54:21,030 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T01:54:21,030 INFO [M:0;ef6f18c58dc9:33525 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T01:54:21,030 INFO [M:0;ef6f18c58dc9:33525 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T01:54:21,030 DEBUG [M:0;ef6f18c58dc9:33525 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T01:54:21,030 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T01:54:21,030 DEBUG [M:0;ef6f18c58dc9:33525 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T01:54:21,030 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709260047 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.large.0-1733709260047,5,FailOnTimeoutGroup] 2024-12-09T01:54:21,030 DEBUG [master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709260047 {}] cleaner.HFileCleaner(306): Exit Thread[master/ef6f18c58dc9:0:becomeActiveMaster-HFileCleaner.small.0-1733709260047,5,FailOnTimeoutGroup] 2024-12-09T01:54:21,030 INFO [M:0;ef6f18c58dc9:33525 {}] hbase.ChoreService(370): Chore service for: master/ef6f18c58dc9:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T01:54:21,030 INFO [M:0;ef6f18c58dc9:33525 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T01:54:21,030 DEBUG [M:0;ef6f18c58dc9:33525 {}] master.HMaster(1795): Stopping service threads 2024-12-09T01:54:21,030 INFO [M:0;ef6f18c58dc9:33525 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T01:54:21,031 INFO [M:0;ef6f18c58dc9:33525 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T01:54:21,031 INFO [M:0;ef6f18c58dc9:33525 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T01:54:21,031 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T01:54:21,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T01:54:21,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T01:54:21,033 DEBUG [M:0;ef6f18c58dc9:33525 {}] zookeeper.ZKUtil(347): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T01:54:21,033 WARN [M:0;ef6f18c58dc9:33525 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T01:54:21,033 INFO [M:0;ef6f18c58dc9:33525 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/.lastflushedseqids 2024-12-09T01:54:21,037 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.1733709043481 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:21,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741838_1014 (size=99) 2024-12-09T01:54:21,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741838_1014 (size=99) 2024-12-09T01:54:21,042 INFO [M:0;ef6f18c58dc9:33525 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T01:54:21,042 INFO [M:0;ef6f18c58dc9:33525 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T01:54:21,042 DEBUG [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T01:54:21,042 INFO [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:54:21,042 DEBUG [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:54:21,042 DEBUG [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T01:54:21,042 DEBUG [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:54:21,042 INFO [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-12-09T01:54:21,058 DEBUG [M:0;ef6f18c58dc9:33525 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dd50fc5b263a44ce800bc81d44db95f2 is 82, key is hbase:meta,,1/info:regioninfo/1733709260682/Put/seqid=0 2024-12-09T01:54:21,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741839_1015 (size=5672) 2024-12-09T01:54:21,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741839_1015 (size=5672) 2024-12-09T01:54:21,063 INFO [M:0;ef6f18c58dc9:33525 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dd50fc5b263a44ce800bc81d44db95f2 2024-12-09T01:54:21,089 DEBUG [M:0;ef6f18c58dc9:33525 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bf1dd10b1f03410b84249e418d4cc0ae is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1733709260700/Put/seqid=0 2024-12-09T01:54:21,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741840_1016 (size=5275) 2024-12-09T01:54:21,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741840_1016 (size=5275) 2024-12-09T01:54:21,094 INFO [M:0;ef6f18c58dc9:33525 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bf1dd10b1f03410b84249e418d4cc0ae 2024-12-09T01:54:21,111 DEBUG [M:0;ef6f18c58dc9:33525 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/32c90d6c581740b183ce385a8ed675ab is 69, key is ef6f18c58dc9,34649,1733709259851/rs:state/1733709260106/Put/seqid=0 2024-12-09T01:54:21,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741841_1017 (size=5156) 2024-12-09T01:54:21,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741841_1017 (size=5156) 2024-12-09T01:54:21,116 INFO [M:0;ef6f18c58dc9:33525 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/32c90d6c581740b183ce385a8ed675ab 2024-12-09T01:54:21,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:54:21,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34649-0x100747b9cea0001, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:54:21,128 INFO [RS:0;ef6f18c58dc9:34649 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:54:21,128 INFO [RS:0;ef6f18c58dc9:34649 {}] regionserver.HRegionServer(1031): Exiting; stopping=ef6f18c58dc9,34649,1733709259851; zookeeper connection closed. 2024-12-09T01:54:21,128 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@668fd192 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@668fd192 2024-12-09T01:54:21,129 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-09T01:54:21,141 DEBUG [M:0;ef6f18c58dc9:33525 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1bc8713a1c57456ab665077c17e6822d is 52, key is load_balancer_on/state:d/1733709260788/Put/seqid=0 2024-12-09T01:54:21,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741842_1018 (size=5056) 2024-12-09T01:54:21,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741842_1018 (size=5056) 2024-12-09T01:54:21,146 INFO [M:0;ef6f18c58dc9:33525 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1bc8713a1c57456ab665077c17e6822d 2024-12-09T01:54:21,150 DEBUG [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dd50fc5b263a44ce800bc81d44db95f2 as hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dd50fc5b263a44ce800bc81d44db95f2 2024-12-09T01:54:21,155 INFO [M:0;ef6f18c58dc9:33525 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dd50fc5b263a44ce800bc81d44db95f2, entries=8, sequenceid=29, filesize=5.5 K 2024-12-09T01:54:21,156 DEBUG [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/bf1dd10b1f03410b84249e418d4cc0ae as hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bf1dd10b1f03410b84249e418d4cc0ae 2024-12-09T01:54:21,161 INFO [M:0;ef6f18c58dc9:33525 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/bf1dd10b1f03410b84249e418d4cc0ae, entries=3, sequenceid=29, filesize=5.2 K 2024-12-09T01:54:21,162 DEBUG [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/32c90d6c581740b183ce385a8ed675ab as hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/32c90d6c581740b183ce385a8ed675ab 2024-12-09T01:54:21,167 INFO [M:0;ef6f18c58dc9:33525 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/32c90d6c581740b183ce385a8ed675ab, entries=1, sequenceid=29, filesize=5.0 K 2024-12-09T01:54:21,168 DEBUG [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1bc8713a1c57456ab665077c17e6822d as hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1bc8713a1c57456ab665077c17e6822d 2024-12-09T01:54:21,172 INFO [M:0;ef6f18c58dc9:33525 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45481/user/jenkins/test-data/6f7e0c76-1199-9d37-dbdf-cfad24d062a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1bc8713a1c57456ab665077c17e6822d, entries=1, sequenceid=29, filesize=4.9 K 2024-12-09T01:54:21,174 INFO [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=29, compaction requested=false 2024-12-09T01:54:21,175 INFO [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T01:54:21,175 DEBUG [M:0;ef6f18c58dc9:33525 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733709261042Disabling compacts and flushes for region at 1733709261042Disabling writes for close at 1733709261042Obtaining lock to block concurrent updates at 1733709261042Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733709261042Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1733709261043 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733709261043Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733709261043Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733709261058 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733709261058Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733709261068 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733709261088 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733709261088Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733709261098 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733709261111 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733709261111Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1733709261120 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1733709261141 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1733709261141Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a205e14: reopening flushed file at 1733709261149 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1745321d: reopening flushed file at 1733709261155 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@28c2ba01: reopening flushed file at 1733709261161 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@124b6ada: reopening flushed file at 1733709261167 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=29, compaction requested=false at 1733709261174 (+7 ms)Writing region close event to WAL at 1733709261175 (+1 ms)Closed at 1733709261175 2024-12-09T01:54:21,176 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,176 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,176 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,176 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,176 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T01:54:21,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44165 is added to blk_1073741830_1006 (size=10311) 2024-12-09T01:54:21,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38549 is added to blk_1073741830_1006 (size=10311) 2024-12-09T01:54:21,178 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T01:54:21,179 INFO [M:0;ef6f18c58dc9:33525 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T01:54:21,179 INFO [M:0;ef6f18c58dc9:33525 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33525 2024-12-09T01:54:21,179 INFO [M:0;ef6f18c58dc9:33525 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T01:54:21,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,36919,1733709043959/ef6f18c58dc9%2C36919%2C1733709043959.1733709044172 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:21,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:54:21,281 INFO [M:0;ef6f18c58dc9:33525 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T01:54:21,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33525-0x100747b9cea0000, quorum=127.0.0.1:57283, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T01:54:21,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@e818af2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:54:21,284 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@87b2e2b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:54:21,284 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:54:21,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@71465f4c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:54:21,284 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10e33487{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/hadoop.log.dir/,STOPPED} 2024-12-09T01:54:21,285 WARN [BP-375238139-172.17.0.2-1733709259080 heartbeating to localhost/127.0.0.1:45481 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:54:21,285 WARN [BP-375238139-172.17.0.2-1733709259080 heartbeating to localhost/127.0.0.1:45481 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-375238139-172.17.0.2-1733709259080 (Datanode Uuid 177f7649-7d8d-4582-99eb-9f5047aeff42) service to localhost/127.0.0.1:45481 2024-12-09T01:54:21,285 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:54:21,285 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:54:21,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42375/user/jenkins/test-data/62cf9b76-4cc0-88f4-0222-f3daaba6998b/WALs/ef6f18c58dc9,45353,1733709043006/ef6f18c58dc9%2C45353%2C1733709043006.meta.1733709043846.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-12-09T01:54:21,286 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/cluster_20aeaa3d-8043-e7e0-d846-69496467b8d7/data/data3/current/BP-375238139-172.17.0.2-1733709259080 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:54:21,286 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/cluster_20aeaa3d-8043-e7e0-d846-69496467b8d7/data/data4/current/BP-375238139-172.17.0.2-1733709259080 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:54:21,286 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:54:21,288 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@20042785{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T01:54:21,288 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@79ca80d8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:54:21,288 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:54:21,288 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5eeda3c1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:54:21,288 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@186dc28{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/hadoop.log.dir/,STOPPED} 2024-12-09T01:54:21,290 WARN [BP-375238139-172.17.0.2-1733709259080 heartbeating to localhost/127.0.0.1:45481 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T01:54:21,290 WARN [BP-375238139-172.17.0.2-1733709259080 heartbeating to localhost/127.0.0.1:45481 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-375238139-172.17.0.2-1733709259080 (Datanode Uuid 7f5a0ab2-19e4-4897-93a9-a2a92e0a182b) service to localhost/127.0.0.1:45481 2024-12-09T01:54:21,290 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/cluster_20aeaa3d-8043-e7e0-d846-69496467b8d7/data/data1/current/BP-375238139-172.17.0.2-1733709259080 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:54:21,290 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/cluster_20aeaa3d-8043-e7e0-d846-69496467b8d7/data/data2/current/BP-375238139-172.17.0.2-1733709259080 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T01:54:21,291 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T01:54:21,291 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T01:54:21,291 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T01:54:21,296 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@25e609c4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T01:54:21,296 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c411d10{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T01:54:21,296 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T01:54:21,296 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4dfd679e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T01:54:21,296 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34f5471c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82d5d08f-4ebe-e199-bf07-3d7ec0996a16/hadoop.log.dir/,STOPPED} 2024-12-09T01:54:21,304 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T01:54:21,318 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T01:54:21,328 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 230) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45481 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45481 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45481 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45481 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:45481 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45481 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45481 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:45481 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=536 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=170 (was 170), ProcessCount=11 (was 11), AvailableMemoryMB=11494 (was 11516)